Lines Matching +full:reset +full:- +full:pin +full:- +full:assert +full:- +full:time +full:- +full:ms

12  * See the COPYING file in the top-level directory.
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
34 #include "system/accel-blocker.h"
35 #include "accel/accel-ops.h"
41 #include "qemu/main-loop.h"
45 #include "qapi/qapi-types-common.h"
46 #include "qapi/qapi-visit-common.h"
47 #include "system/reset.h"
48 #include "qemu/guest-random.h"
50 #include "kvm-cpus.h"
57 /* This check must be after config-host.h is included */
150 if (rfd->gsi == gsi) { in kvm_resample_fd_remove()
162 rfd->gsi = gsi; in kvm_resample_fd_insert()
163 rfd->resample_event = event; in kvm_resample_fd_insert()
173 if (rfd->gsi == gsi) { in kvm_resample_fd_notify()
174 event_notifier_set(rfd->resample_event); in kvm_resample_fd_notify()
191 unsigned int i, cur = kml->nr_slots_allocated; in kvm_slots_grow()
194 if (nr_slots_new > kvm_state->nr_slots_max) { in kvm_slots_grow()
195 nr_slots_new = kvm_state->nr_slots_max; in kvm_slots_grow()
206 assert(kml->slots); in kvm_slots_grow()
207 slots = g_renew(KVMSlot, kml->slots, nr_slots_new); in kvm_slots_grow()
210 * memslots require fields to be zero-initialized. E.g. pointers, in kvm_slots_grow()
213 memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur)); in kvm_slots_grow()
220 kml->slots = slots; in kvm_slots_grow()
221 kml->nr_slots_allocated = nr_slots_new; in kvm_slots_grow()
229 return kvm_slots_grow(kml, kml->nr_slots_allocated * 2); in kvm_slots_double()
236 return s->nr_slots_max; in kvm_get_max_memslots()
246 for (i = 0; i < s->nr_as; i++) { in kvm_get_free_memslots()
247 if (!s->as[i].ml) { in kvm_get_free_memslots()
250 used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used); in kvm_get_free_memslots()
254 return s->nr_slots_max - used_slots; in kvm_get_free_memslots()
263 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_get_free_slot()
264 if (kml->slots[i].memory_size == 0) { in kvm_get_free_slot()
265 return &kml->slots[i]; in kvm_get_free_slot()
275 n = kml->nr_slots_allocated; in kvm_get_free_slot()
277 return &kml->slots[n]; in kvm_get_free_slot()
302 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_lookup_matching_slot()
303 KVMSlot *mem = &kml->slots[i]; in kvm_lookup_matching_slot()
305 if (start_addr == mem->start_addr && size == mem->memory_size) { in kvm_lookup_matching_slot()
320 hwaddr size = int128_get64(section->size); in kvm_align_section()
324 with sub-page size and unaligned start address. Pad the start in kvm_align_section()
326 aligned = ROUND_UP(section->offset_within_address_space, in kvm_align_section()
328 delta = aligned - section->offset_within_address_space; in kvm_align_section()
334 return (size - delta) & qemu_real_host_page_mask(); in kvm_align_section()
340 KVMMemoryListener *kml = &s->memory_listener; in kvm_physical_memory_addr_from_host()
344 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_physical_memory_addr_from_host()
345 KVMSlot *mem = &kml->slots[i]; in kvm_physical_memory_addr_from_host()
347 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { in kvm_physical_memory_addr_from_host()
348 *phys_addr = mem->start_addr + (ram - mem->ram); in kvm_physical_memory_addr_from_host()
364 mem.slot = slot->slot | (kml->as_id << 16); in kvm_set_user_memory_region()
365 mem.guest_phys_addr = slot->start_addr; in kvm_set_user_memory_region()
366 mem.userspace_addr = (unsigned long)slot->ram; in kvm_set_user_memory_region()
367 mem.flags = slot->flags; in kvm_set_user_memory_region()
368 mem.guest_memfd = slot->guest_memfd; in kvm_set_user_memory_region()
369 mem.guest_memfd_offset = slot->guest_memfd_offset; in kvm_set_user_memory_region()
371 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) { in kvm_set_user_memory_region()
385 mem.memory_size = slot->memory_size; in kvm_set_user_memory_region()
391 slot->old_flags = mem.flags; in kvm_set_user_memory_region()
403 __func__, mem.slot, slot->start_addr, in kvm_set_user_memory_region()
410 __func__, mem.slot, slot->start_addr, in kvm_set_user_memory_region()
421 trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); in kvm_park_vcpu()
424 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu); in kvm_park_vcpu()
425 vcpu->kvm_fd = cpu->kvm_fd; in kvm_park_vcpu()
426 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); in kvm_park_vcpu()
432 int kvm_fd = -ENOENT; in kvm_unpark_vcpu()
434 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { in kvm_unpark_vcpu()
435 if (cpu->vcpu_id == vcpu_id) { in kvm_unpark_vcpu()
437 kvm_fd = cpu->kvm_fd; in kvm_unpark_vcpu()
452 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { in kvm_reset_parked_vcpus()
453 kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd); in kvm_reset_parked_vcpus()
458 * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
480 cpu->kvm_fd = kvm_fd; in kvm_create_vcpu()
481 cpu->kvm_state = s; in kvm_create_vcpu()
482 if (!s->guest_state_protected) { in kvm_create_vcpu()
483 cpu->vcpu_dirty = true; in kvm_create_vcpu()
485 cpu->dirty_pages = 0; in kvm_create_vcpu()
486 cpu->throttle_us_per_full = 0; in kvm_create_vcpu()
488 trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd); in kvm_create_vcpu()
511 trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); in do_kvm_destroy_vcpu()
526 if (s->coalesced_mmio_ring == (void *)cpu->kvm_run + PAGE_SIZE) { in do_kvm_destroy_vcpu()
527 s->coalesced_mmio_ring = NULL; in do_kvm_destroy_vcpu()
530 ret = munmap(cpu->kvm_run, mmap_size); in do_kvm_destroy_vcpu()
534 cpu->kvm_run = NULL; in do_kvm_destroy_vcpu()
536 if (cpu->kvm_dirty_gfns) { in do_kvm_destroy_vcpu()
537 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes); in do_kvm_destroy_vcpu()
541 cpu->kvm_dirty_gfns = NULL; in do_kvm_destroy_vcpu()
563 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); in kvm_init_vcpu()
572 error_setg_errno(errp, -ret, in kvm_init_vcpu()
581 error_setg_errno(errp, -mmap_size, in kvm_init_vcpu()
586 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, in kvm_init_vcpu()
587 cpu->kvm_fd, 0); in kvm_init_vcpu()
588 if (cpu->kvm_run == MAP_FAILED) { in kvm_init_vcpu()
589 ret = -errno; in kvm_init_vcpu()
596 if (s->coalesced_mmio && !s->coalesced_mmio_ring) { in kvm_init_vcpu()
597 s->coalesced_mmio_ring = in kvm_init_vcpu()
598 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE; in kvm_init_vcpu()
601 if (s->kvm_dirty_ring_size) { in kvm_init_vcpu()
603 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes, in kvm_init_vcpu()
605 cpu->kvm_fd, in kvm_init_vcpu()
607 if (cpu->kvm_dirty_gfns == MAP_FAILED) { in kvm_init_vcpu()
608 ret = -errno; in kvm_init_vcpu()
615 error_setg_errno(errp, -ret, in kvm_init_vcpu()
619 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); in kvm_init_vcpu()
629 if (!kvm_state || kvm_state->fd == -1) { in kvm_close()
635 close(cpu->kvm_fd); in kvm_close()
636 cpu->kvm_fd = -1; in kvm_close()
637 close(cpu->kvm_vcpu_stats_fd); in kvm_close()
638 cpu->kvm_vcpu_stats_fd = -1; in kvm_close()
641 if (kvm_state && kvm_state->fd != -1) { in kvm_close()
642 close(kvm_state->vmfd); in kvm_close()
643 kvm_state->vmfd = -1; in kvm_close()
644 close(kvm_state->fd); in kvm_close()
645 kvm_state->fd = -1; in kvm_close()
656 bool readonly = mr->readonly || memory_region_is_romd(mr); in kvm_mem_flags()
666 assert(kvm_guest_memfd_supported); in kvm_mem_flags()
676 mem->flags = kvm_mem_flags(mr); in kvm_slot_update_flags()
679 if (mem->flags == mem->old_flags) { in kvm_slot_update_flags()
709 ret = kvm_slot_update_flags(kml, mem, section->mr); in kvm_section_update_flags()
711 size -= slot_size; in kvm_section_update_flags()
756 ram_addr_t start = slot->ram_start_offset; in kvm_slot_sync_dirty_pages()
757 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); in kvm_slot_sync_dirty_pages()
759 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); in kvm_slot_sync_dirty_pages()
764 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size); in kvm_slot_reset_dirty_pages()
767 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
772 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) { in kvm_slot_init_dirty_bitmap()
779 * bits-per-long. But for case when the kernel is 64bits and in kvm_slot_init_dirty_bitmap()
781 * bits-per-long, since sizeof(long) is different between kernel in kvm_slot_init_dirty_bitmap()
787 * a hope that sizeof(long) won't become >8 any time soon. in kvm_slot_init_dirty_bitmap()
790 * And mem->memory_size is aligned to it (otherwise this mem can't in kvm_slot_init_dirty_bitmap()
793 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(), in kvm_slot_init_dirty_bitmap()
795 mem->dirty_bmap = g_malloc0(bitmap_size); in kvm_slot_init_dirty_bitmap()
796 mem->dirty_bmap_size = bitmap_size; in kvm_slot_init_dirty_bitmap()
808 d.dirty_bitmap = slot->dirty_bmap; in kvm_slot_get_dirty_log()
809 d.slot = slot->slot | (slot->as_id << 16); in kvm_slot_get_dirty_log()
812 if (ret == -ENOENT) { in kvm_slot_get_dirty_log()
830 if (as_id >= s->nr_as) { in kvm_dirty_ring_mark_page()
834 kml = s->as[as_id].ml; in kvm_dirty_ring_mark_page()
835 mem = &kml->slots[slot_id]; in kvm_dirty_ring_mark_page()
837 if (!mem->memory_size || offset >= in kvm_dirty_ring_mark_page()
838 (mem->memory_size / qemu_real_host_page_size())) { in kvm_dirty_ring_mark_page()
842 set_bit(offset, mem->dirty_bmap); in kvm_dirty_ring_mark_page()
851 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY; in dirty_gfn_is_dirtied()
857 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS in dirty_gfn_set_collected()
861 * ------------------------------------------------------------------------------ in dirty_gfn_set_collected()
863 * store-rel flags for gfn0 in dirty_gfn_set_collected()
864 * load-acq flags for gfn0 in dirty_gfn_set_collected()
865 * store-rel RESET for gfn0 in dirty_gfn_set_collected()
867 * load-acq flags for gfn0 in dirty_gfn_set_collected()
868 * check if flags have RESET in dirty_gfn_set_collected()
872 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); in dirty_gfn_set_collected()
881 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur; in kvm_dirty_ring_reap_one()
882 uint32_t ring_size = s->kvm_dirty_ring_size; in kvm_dirty_ring_reap_one()
883 uint32_t count = 0, fetch = cpu->kvm_fetch_index; in kvm_dirty_ring_reap_one()
890 if (!cpu->created) { in kvm_dirty_ring_reap_one()
894 assert(dirty_gfns && ring_size); in kvm_dirty_ring_reap_one()
895 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); in kvm_dirty_ring_reap_one()
902 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff, in kvm_dirty_ring_reap_one()
903 cur->offset); in kvm_dirty_ring_reap_one()
905 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset); in kvm_dirty_ring_reap_one()
909 cpu->kvm_fetch_index = fetch; in kvm_dirty_ring_reap_one()
910 cpu->dirty_pages += count; in kvm_dirty_ring_reap_one()
934 assert(ret == total); in kvm_dirty_ring_reap_locked()
937 stamp = get_clock() - stamp; in kvm_dirty_ring_reap_locked()
964 * bitmaps before correctly re-protect those dirtied pages. in kvm_dirty_ring_reap()
967 * reset below. in kvm_dirty_ring_reap()
998 * before calling this function have been put into the per-kvmslot
1011 assert(bql_locked()); in kvm_dirty_ring_flush()
1022 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
1027 * NOTE: caller must be with kml->slots_lock held.
1052 size -= slot_size; in kvm_physical_sync_dirty_bitmap()
1056 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
1059 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
1076 start_delta = start - bmap_start; in kvm_log_clear_one_slot()
1087 end = mem->memory_size / psize; in kvm_log_clear_one_slot()
1088 if (bmap_npages > end - bmap_start) { in kvm_log_clear_one_slot()
1089 bmap_npages = end - bmap_start; in kvm_log_clear_one_slot()
1103 * |<-------- bmap_npages -----------..>| in kvm_log_clear_one_slot()
1106 * |----------------|-------------|------------------|------------| in kvm_log_clear_one_slot()
1115 assert(bmap_start % BITS_PER_LONG == 0); in kvm_log_clear_one_slot()
1117 assert(mem->dirty_bmap); in kvm_log_clear_one_slot()
1118 if (start_delta || bmap_npages - size / psize) { in kvm_log_clear_one_slot()
1119 /* Slow path - we need to manipulate a temp bitmap */ in kvm_log_clear_one_slot()
1121 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap, in kvm_log_clear_one_slot()
1132 * Fast path - both start and size align well with BITS_PER_LONG in kvm_log_clear_one_slot()
1135 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start); in kvm_log_clear_one_slot()
1140 assert(bmap_npages <= UINT32_MAX); in kvm_log_clear_one_slot()
1142 d.slot = mem->slot | (as_id << 16); in kvm_log_clear_one_slot()
1145 if (ret < 0 && ret != -ENOENT) { in kvm_log_clear_one_slot()
1161 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta, in kvm_log_clear_one_slot()
1170 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1172 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1187 if (!s->manual_dirty_log_protect) { in kvm_physical_log_clear()
1192 start = section->offset_within_address_space; in kvm_physical_log_clear()
1193 size = int128_get64(section->size); in kvm_physical_log_clear()
1202 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_physical_log_clear()
1203 mem = &kml->slots[i]; in kvm_physical_log_clear()
1205 if (!mem->memory_size || in kvm_physical_log_clear()
1206 mem->start_addr > start + size - 1 || in kvm_physical_log_clear()
1207 start > mem->start_addr + mem->memory_size - 1) { in kvm_physical_log_clear()
1211 if (start >= mem->start_addr) { in kvm_physical_log_clear()
1213 offset = start - mem->start_addr; in kvm_physical_log_clear()
1214 count = MIN(mem->memory_size - offset, size); in kvm_physical_log_clear()
1218 count = MIN(mem->memory_size, size - (mem->start_addr - start)); in kvm_physical_log_clear()
1220 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); in kvm_physical_log_clear()
1237 if (s->coalesced_mmio) { in kvm_coalesce_mmio_region()
1254 if (s->coalesced_mmio) { in kvm_uncoalesce_mmio_region()
1271 if (s->coalesced_pio) { in kvm_coalesce_pio_add()
1288 if (s->coalesced_pio) { in kvm_coalesce_pio_del()
1326 * - replace them on VM reset
1327 * - block a migration for a VM with a poisoned page
1343 qemu_ram_remap(page->ram_addr); in kvm_unpoison_all()
1353 if (page->ram_addr == ram_addr) { in kvm_hwpoison_page_add()
1358 page->ram_addr = ram_addr; in kvm_hwpoison_page_add()
1373 * For example, PPC is always treated as big-endian even if running in adjust_ioeventfd_endianness()
1403 return -ENOSYS; in kvm_set_ioeventfd_mmio()
1416 return -errno; in kvm_set_ioeventfd_mmio()
1435 return -ENOSYS; in kvm_set_ioeventfd_pio()
1454 while (list->name) { in kvm_check_extension_list()
1455 if (!kvm_check_extension(s, list->value)) { in kvm_check_extension_list()
1476 assert((attr & kvm_supported_memory_attributes) == attr); in kvm_set_memory_attributes()
1507 MemoryRegion *mr = section->mr; in kvm_set_phys_mem()
1508 bool writable = !mr->readonly && !mr->rom_device; in kvm_set_phys_mem()
1516 } else if (!mr->romd_mode) { in kvm_set_phys_mem()
1529 mr_offset = section->offset_within_region + start_addr - in kvm_set_phys_mem()
1530 section->offset_within_address_space; in kvm_set_phys_mem()
1543 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_set_phys_mem()
1557 if (kvm_state->kvm_dirty_ring_size) { in kvm_set_phys_mem()
1559 if (kvm_state->kvm_dirty_ring_with_bitmap) { in kvm_set_phys_mem()
1570 g_free(mem->dirty_bmap); in kvm_set_phys_mem()
1571 mem->dirty_bmap = NULL; in kvm_set_phys_mem()
1572 mem->memory_size = 0; in kvm_set_phys_mem()
1573 mem->flags = 0; in kvm_set_phys_mem()
1577 __func__, strerror(-err)); in kvm_set_phys_mem()
1581 size -= slot_size; in kvm_set_phys_mem()
1582 kml->nr_slots_used--; in kvm_set_phys_mem()
1591 mem->as_id = kml->as_id; in kvm_set_phys_mem()
1592 mem->memory_size = slot_size; in kvm_set_phys_mem()
1593 mem->start_addr = start_addr; in kvm_set_phys_mem()
1594 mem->ram_start_offset = ram_start_offset; in kvm_set_phys_mem()
1595 mem->ram = ram; in kvm_set_phys_mem()
1596 mem->flags = kvm_mem_flags(mr); in kvm_set_phys_mem()
1597 mem->guest_memfd = mr->ram_block->guest_memfd; in kvm_set_phys_mem()
1598 mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host; in kvm_set_phys_mem()
1604 strerror(-err)); in kvm_set_phys_mem()
1612 __func__, strerror(-err)); in kvm_set_phys_mem()
1620 size -= slot_size; in kvm_set_phys_mem()
1621 kml->nr_slots_used++; in kvm_set_phys_mem()
1628 struct KVMDirtyRingReaper *r = &s->reaper; in kvm_dirty_ring_reaper_thread()
1635 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT; in kvm_dirty_ring_reaper_thread()
1648 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; in kvm_dirty_ring_reaper_thread()
1654 r->reaper_iteration++; in kvm_dirty_ring_reaper_thread()
1662 struct KVMDirtyRingReaper *r = &s->reaper; in kvm_dirty_ring_reaper_init()
1664 qemu_thread_create(&r->reaper_thr, "kvm-reaper", in kvm_dirty_ring_reaper_init()
1671 uint32_t ring_size = s->kvm_dirty_ring_size; in kvm_dirty_ring_init()
1676 s->kvm_dirty_ring_size = 0; in kvm_dirty_ring_init()
1677 s->kvm_dirty_ring_bytes = 0; in kvm_dirty_ring_init()
1703 return -EINVAL; in kvm_dirty_ring_init()
1709 "Suggested minimum value is 1024.", strerror(-ret)); in kvm_dirty_ring_init()
1710 return -EIO; in kvm_dirty_ring_init()
1719 "%s. ", strerror(-ret)); in kvm_dirty_ring_init()
1720 return -EIO; in kvm_dirty_ring_init()
1723 s->kvm_dirty_ring_with_bitmap = true; in kvm_dirty_ring_init()
1726 s->kvm_dirty_ring_size = ring_size; in kvm_dirty_ring_init()
1727 s->kvm_dirty_ring_bytes = ring_bytes; in kvm_dirty_ring_init()
1739 update->section = *section; in kvm_region_add()
1741 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next); in kvm_region_add()
1751 update->section = *section; in kvm_region_del()
1753 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next); in kvm_region_del()
1763 if (QSIMPLEQ_EMPTY(&kml->transaction_add) && in kvm_region_commit()
1764 QSIMPLEQ_EMPTY(&kml->transaction_del)) { in kvm_region_commit()
1775 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); in kvm_region_commit()
1776 u2 = QSIMPLEQ_FIRST(&kml->transaction_add); in kvm_region_commit()
1780 range_init_nofail(&r1, u1->section.offset_within_address_space, in kvm_region_commit()
1781 int128_get64(u1->section.size)); in kvm_region_commit()
1782 range_init_nofail(&r2, u2->section.offset_within_address_space, in kvm_region_commit()
1783 int128_get64(u2->section.size)); in kvm_region_commit()
1802 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) { in kvm_region_commit()
1803 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); in kvm_region_commit()
1804 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next); in kvm_region_commit()
1806 kvm_set_phys_mem(kml, &u1->section, false); in kvm_region_commit()
1807 memory_region_unref(u1->section.mr); in kvm_region_commit()
1811 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) { in kvm_region_commit()
1812 u1 = QSIMPLEQ_FIRST(&kml->transaction_add); in kvm_region_commit()
1813 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next); in kvm_region_commit()
1815 memory_region_ref(u1->section.mr); in kvm_region_commit()
1816 kvm_set_phys_mem(kml, &u1->section, true); in kvm_region_commit()
1848 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_log_sync_global()
1849 mem = &kml->slots[i]; in kvm_log_sync_global()
1850 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_log_sync_global()
1853 if (s->kvm_dirty_ring_with_bitmap && last_stage && in kvm_log_sync_global()
1879 section->mr->name, section->offset_within_region, in kvm_log_clear()
1880 int128_get64(section->size)); in kvm_log_clear()
1893 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, in kvm_mem_ioeventfd_add()
1894 data, true, int128_get64(section->size), in kvm_mem_ioeventfd_add()
1898 __func__, strerror(-r), -r); in kvm_mem_ioeventfd_add()
1911 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, in kvm_mem_ioeventfd_del()
1912 data, false, int128_get64(section->size), in kvm_mem_ioeventfd_del()
1916 __func__, strerror(-r), -r); in kvm_mem_ioeventfd_del()
1929 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, in kvm_io_ioeventfd_add()
1930 data, true, int128_get64(section->size), in kvm_io_ioeventfd_add()
1934 __func__, strerror(-r), -r); in kvm_io_ioeventfd_add()
1948 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, in kvm_io_ioeventfd_del()
1949 data, false, int128_get64(section->size), in kvm_io_ioeventfd_del()
1953 __func__, strerror(-r), -r); in kvm_io_ioeventfd_del()
1963 kml->as_id = as_id; in kvm_memory_listener_register()
1967 QSIMPLEQ_INIT(&kml->transaction_add); in kvm_memory_listener_register()
1968 QSIMPLEQ_INIT(&kml->transaction_del); in kvm_memory_listener_register()
1970 kml->listener.region_add = kvm_region_add; in kvm_memory_listener_register()
1971 kml->listener.region_del = kvm_region_del; in kvm_memory_listener_register()
1972 kml->listener.commit = kvm_region_commit; in kvm_memory_listener_register()
1973 kml->listener.log_start = kvm_log_start; in kvm_memory_listener_register()
1974 kml->listener.log_stop = kvm_log_stop; in kvm_memory_listener_register()
1975 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL; in kvm_memory_listener_register()
1976 kml->listener.name = name; in kvm_memory_listener_register()
1978 if (s->kvm_dirty_ring_size) { in kvm_memory_listener_register()
1979 kml->listener.log_sync_global = kvm_log_sync_global; in kvm_memory_listener_register()
1981 kml->listener.log_sync = kvm_log_sync; in kvm_memory_listener_register()
1982 kml->listener.log_clear = kvm_log_clear; in kvm_memory_listener_register()
1985 memory_listener_register(&kml->listener, as); in kvm_memory_listener_register()
1987 for (i = 0; i < s->nr_as; ++i) { in kvm_memory_listener_register()
1988 if (!s->as[i].as) { in kvm_memory_listener_register()
1989 s->as[i].as = as; in kvm_memory_listener_register()
1990 s->as[i].ml = kml; in kvm_memory_listener_register()
1997 .name = "kvm-io",
2010 assert(kvm_async_interrupts_enabled()); in kvm_set_irq()
2014 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event); in kvm_set_irq()
2020 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status; in kvm_set_irq()
2031 set_bit(gsi, s->used_gsi_bitmap); in set_gsi()
2036 clear_bit(gsi, s->used_gsi_bitmap); in clear_gsi()
2043 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; in kvm_init_irq_routing()
2046 s->used_gsi_bitmap = bitmap_new(gsi_count); in kvm_init_irq_routing()
2047 s->gsi_count = gsi_count; in kvm_init_irq_routing()
2050 s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); in kvm_init_irq_routing()
2051 s->nr_allocated_irq_routes = 0; in kvm_init_irq_routing()
2068 s->irq_routes->flags = 0; in kvm_irqchip_commit_routes()
2070 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); in kvm_irqchip_commit_routes()
2071 assert(ret == 0); in kvm_irqchip_commit_routes()
2080 if (s->irq_routes->nr == s->nr_allocated_irq_routes) { in kvm_add_routing_entry()
2081 n = s->nr_allocated_irq_routes * 2; in kvm_add_routing_entry()
2087 s->irq_routes = g_realloc(s->irq_routes, size); in kvm_add_routing_entry()
2088 s->nr_allocated_irq_routes = n; in kvm_add_routing_entry()
2090 n = s->irq_routes->nr++; in kvm_add_routing_entry()
2091 new = &s->irq_routes->entries[n]; in kvm_add_routing_entry()
2095 set_gsi(s, entry->gsi); in kvm_add_routing_entry()
2104 for (n = 0; n < s->irq_routes->nr; n++) { in kvm_update_routing_entry()
2105 entry = &s->irq_routes->entries[n]; in kvm_update_routing_entry()
2106 if (entry->gsi != new_entry->gsi) { in kvm_update_routing_entry()
2119 return -ESRCH; in kvm_update_routing_entry()
2122 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin) in kvm_irqchip_add_irq_route() argument
2126 assert(pin < s->gsi_count); in kvm_irqchip_add_irq_route()
2132 e.u.irqchip.pin = pin; in kvm_irqchip_add_irq_route()
2145 for (i = 0; i < s->irq_routes->nr; i++) { in kvm_irqchip_release_virq()
2146 e = &s->irq_routes->entries[i]; in kvm_irqchip_release_virq()
2147 if (e->gsi == virq) { in kvm_irqchip_release_virq()
2148 s->irq_routes->nr--; in kvm_irqchip_release_virq()
2149 *e = s->irq_routes->entries[s->irq_routes->nr]; in kvm_irqchip_release_virq()
2177 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); in kvm_irqchip_get_virq()
2178 if (next_virq >= s->gsi_count) { in kvm_irqchip_get_virq()
2179 return -ENOSPC; in kvm_irqchip_get_virq()
2202 KVMState *s = c->s; in kvm_irqchip_add_msi_route()
2214 return -ENOSYS; in kvm_irqchip_add_msi_route()
2234 return -EINVAL; in kvm_irqchip_add_msi_route()
2237 if (s->irq_routes->nr < s->gsi_count) { in kvm_irqchip_add_msi_route()
2238 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A", in kvm_irqchip_add_msi_route()
2243 c->changes++; in kvm_irqchip_add_msi_route()
2246 return -ENOSPC; in kvm_irqchip_add_msi_route()
2262 return -ENOSYS; in kvm_irqchip_update_msi_route()
2276 return -EINVAL; in kvm_irqchip_update_msi_route()
2289 int rfd = resample ? event_notifier_get_fd(resample) : -1; in kvm_irqchip_assign_irqfd()
2297 if (rfd != -1) { in kvm_irqchip_assign_irqfd()
2298 assert(assign); in kvm_irqchip_assign_irqfd()
2347 return -ENOSYS; in kvm_irqchip_add_msi_route()
2352 return -ENOSYS; in kvm_irqchip_add_adapter_route()
2357 return -ENOSYS; in kvm_irqchip_add_hv_sint_route()
2369 return -ENOSYS; in kvm_irqchip_update_msi_route()
2389 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); in kvm_irqchip_add_irqfd_notifier()
2392 return -ENXIO; in kvm_irqchip_add_irqfd_notifier()
2401 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); in kvm_irqchip_remove_irqfd_notifier()
2404 return -ENXIO; in kvm_irqchip_remove_irqfd_notifier()
2411 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi)); in kvm_irqchip_set_qemuirq_gsi()
2418 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO); in kvm_irqchip_create()
2424 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret)); in kvm_irqchip_create()
2436 /* First probe and see if there's a arch-specific hook to create the in kvm_irqchip_create()
2437 * in-kernel irqchip for us */ in kvm_irqchip_create()
2440 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { in kvm_irqchip_create()
2448 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret)); in kvm_irqchip_create()
2453 /* If we have an in-kernel IRQ chip then we must have asynchronous in kvm_irqchip_create()
2461 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal); in kvm_irqchip_create()
2494 return kvm_state && kvm_state->kvm_dirty_ring_size; in kvm_dirty_ring_enabled()
2503 return kvm_state->kvm_dirty_ring_size; in kvm_dirty_ring_size()
2512 } while (ret == -EINTR); in do_kvm_create_vm()
2515 error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret)); in do_kvm_create_vm()
2518 if (ret == -EINVAL) { in do_kvm_create_vm()
2521 error_printf("- for kernels supporting the" in do_kvm_create_vm()
2524 error_printf("- for kernels supporting the vm.allocate_pgste" in do_kvm_create_vm()
2528 if (ret == -EINVAL) { in do_kvm_create_vm()
2538 static int find_kvm_machine_type(MachineState *ms) in find_kvm_machine_type() argument
2540 MachineClass *mc = MACHINE_GET_CLASS(ms); in find_kvm_machine_type()
2543 if (object_property_find(OBJECT(current_machine), "kvm-type")) { in find_kvm_machine_type()
2546 "kvm-type", in find_kvm_machine_type()
2548 type = mc->kvm_type(ms, kvm_type); in find_kvm_machine_type()
2549 } else if (mc->kvm_type) { in find_kvm_machine_type()
2550 type = mc->kvm_type(ms, NULL); in find_kvm_machine_type()
2552 type = kvm_arch_get_default_type(ms); in find_kvm_machine_type()
2574 * page is wr-protected initially, which is against how kvm dirty ring is in kvm_setup_dirty_ring()
2575 * usage - kvm dirty ring requires all pages are wr-protected at the very in kvm_setup_dirty_ring()
2579 * we may expect a higher stall time when starting the migration. In the in kvm_setup_dirty_ring()
2581 * instead of clearing dirty bit, it can be a way to explicitly wr-protect in kvm_setup_dirty_ring()
2584 if (!s->kvm_dirty_ring_size) { in kvm_setup_dirty_ring()
2589 s->manual_dirty_log_protect = dirty_log_manual_caps; in kvm_setup_dirty_ring()
2598 s->manual_dirty_log_protect = 0; in kvm_setup_dirty_ring()
2606 static int kvm_init(AccelState *as, MachineState *ms) in kvm_init() argument
2608 MachineClass *mc = MACHINE_GET_CLASS(ms); in kvm_init()
2615 { "SMP", ms->smp.cpus }, in kvm_init()
2616 { "hotpluggable", ms->smp.max_cpus }, in kvm_init()
2633 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size()); in kvm_init()
2635 s->sigmask_len = 8; in kvm_init()
2639 QTAILQ_INIT(&s->kvm_sw_breakpoints); in kvm_init()
2641 QLIST_INIT(&s->kvm_parked_vcpus); in kvm_init()
2642 s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR); in kvm_init()
2643 if (s->fd == -1) { in kvm_init()
2645 ret = -errno; in kvm_init()
2652 ret = -EINVAL; in kvm_init()
2659 ret = -EINVAL; in kvm_init()
2665 s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); in kvm_init()
2668 if (!s->nr_slots_max) { in kvm_init()
2669 s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT; in kvm_init()
2672 type = find_kvm_machine_type(ms); in kvm_init()
2674 ret = -EINVAL; in kvm_init()
2683 s->vmfd = ret; in kvm_init()
2685 s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); in kvm_init()
2686 if (s->nr_as <= 1) { in kvm_init()
2687 s->nr_as = 1; in kvm_init()
2689 s->as = g_new0(struct KVMAs, s->nr_as); in kvm_init()
2695 while (nc->name) { in kvm_init()
2696 if (nc->num > soft_vcpus_limit) { in kvm_init()
2699 nc->name, nc->num, soft_vcpus_limit); in kvm_init()
2701 if (nc->num > hard_vcpus_limit) { in kvm_init()
2704 nc->name, nc->num, hard_vcpus_limit); in kvm_init()
2717 ret = -EINVAL; in kvm_init()
2718 error_report("kvm does not support %s", missing_cap->name); in kvm_init()
2723 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); in kvm_init()
2724 s->coalesced_pio = s->coalesced_mmio && in kvm_init()
2733 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); in kvm_init()
2735 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); in kvm_init()
2737 s->irq_set_ioctl = KVM_IRQ_LINE; in kvm_init()
2739 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; in kvm_init()
2772 ret = kvm_arch_init(ms, s); in kvm_init()
2784 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { in kvm_init()
2785 … s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; in kvm_init()
2790 if (s->kernel_irqchip_allowed) { in kvm_init()
2794 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; in kvm_init()
2795 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; in kvm_init()
2796 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; in kvm_init()
2797 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; in kvm_init()
2799 kvm_memory_listener_register(s, &s->memory_listener, in kvm_init()
2800 &address_space_memory, 0, "kvm-memory"); in kvm_init()
2804 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); in kvm_init()
2805 if (!s->sync_mmu) { in kvm_init()
2807 assert(!ret); in kvm_init()
2810 if (s->kvm_dirty_ring_size) { in kvm_init()
2822 assert(ret < 0); in kvm_init()
2823 if (s->vmfd >= 0) { in kvm_init()
2824 close(s->vmfd); in kvm_init()
2826 if (s->fd != -1) { in kvm_init()
2827 close(s->fd); in kvm_init()
2829 g_free(s->as); in kvm_init()
2830 g_free(s->memory_listener.slots); in kvm_init()
2837 s->sigmask_len = sigmask_len; in kvm_set_sigmask_len()
2859 run->internal.suberror); in kvm_handle_internal_error()
2861 for (i = 0; i < run->internal.ndata; ++i) { in kvm_handle_internal_error()
2863 i, (uint64_t)run->internal.data[i]); in kvm_handle_internal_error()
2865 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { in kvm_handle_internal_error()
2875 return -1; in kvm_handle_internal_error()
2882 if (!s || s->coalesced_flush_in_progress) { in kvm_flush_coalesced_mmio_buffer()
2886 s->coalesced_flush_in_progress = true; in kvm_flush_coalesced_mmio_buffer()
2888 if (s->coalesced_mmio_ring) { in kvm_flush_coalesced_mmio_buffer()
2889 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; in kvm_flush_coalesced_mmio_buffer()
2890 while (ring->first != ring->last) { in kvm_flush_coalesced_mmio_buffer()
2893 ent = &ring->coalesced_mmio[ring->first]; in kvm_flush_coalesced_mmio_buffer()
2895 if (ent->pio == 1) { in kvm_flush_coalesced_mmio_buffer()
2896 address_space_write(&address_space_io, ent->phys_addr, in kvm_flush_coalesced_mmio_buffer()
2897 MEMTXATTRS_UNSPECIFIED, ent->data, in kvm_flush_coalesced_mmio_buffer()
2898 ent->len); in kvm_flush_coalesced_mmio_buffer()
2900 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); in kvm_flush_coalesced_mmio_buffer()
2903 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; in kvm_flush_coalesced_mmio_buffer()
2907 s->coalesced_flush_in_progress = false; in kvm_flush_coalesced_mmio_buffer()
2912 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { in do_kvm_cpu_synchronize_state()
2919 error_report("Failed to get registers: %s", strerror(-ret)); in do_kvm_cpu_synchronize_state()
2926 cpu->vcpu_dirty = true; in do_kvm_cpu_synchronize_state()
2932 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { in kvm_cpu_synchronize_state()
2943 error_reportf_err(err, "Restoring resisters after reset: "); in do_kvm_cpu_synchronize_post_reset()
2945 error_report("Failed to put registers after reset: %s", in do_kvm_cpu_synchronize_post_reset()
2946 strerror(-ret)); in do_kvm_cpu_synchronize_post_reset()
2952 cpu->vcpu_dirty = false; in do_kvm_cpu_synchronize_post_reset()
2973 strerror(-ret)); in do_kvm_cpu_synchronize_post_init()
2978 cpu->vcpu_dirty = false; in do_kvm_cpu_synchronize_post_init()
2983 if (!kvm_state->guest_state_protected) { in kvm_cpu_synchronize_post_init()
2994 cpu->vcpu_dirty = true; in do_kvm_cpu_synchronize_pre_loadvm()
3010 qatomic_set(&cpu->kvm_run->immediate_exit, 1); in kvm_cpu_kick()
3031 qatomic_set(&cpu->kvm_run->immediate_exit, 0); in kvm_eat_signals()
3032 /* Write kvm_run->immediate_exit before the cpu->exit_request in kvm_eat_signals()
3044 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { in kvm_eat_signals()
3050 if (r == -1) { in kvm_eat_signals()
3064 int ret = -EINVAL; in kvm_convert_memory()
3081 * Ignore converting non-assigned region to shared. in kvm_convert_memory()
3085 * and vIO-APIC 0xFEC00000 4K page. in kvm_convert_memory()
3127 ret = ram_block_attributes_state_change(RAM_BLOCK_ATTRIBUTES(mr->rdm), in kvm_convert_memory()
3137 if (rb->page_size != qemu_real_host_page_size()) { in kvm_convert_memory()
3140 * pre-allocated and doesn't need to be discarded in kvm_convert_memory()
3156 struct kvm_run *run = cpu->kvm_run; in kvm_cpu_exec()
3162 qatomic_set(&cpu->exit_request, 0); in kvm_cpu_exec()
3172 if (cpu->vcpu_dirty) { in kvm_cpu_exec()
3180 strerror(-ret)); in kvm_cpu_exec()
3182 ret = -1; in kvm_cpu_exec()
3186 cpu->vcpu_dirty = false; in kvm_cpu_exec()
3190 if (qatomic_read(&cpu->exit_request)) { in kvm_cpu_exec()
3194 * instruction emulation. This self-signal will ensure that we in kvm_cpu_exec()
3200 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. in kvm_cpu_exec()
3220 if (run_ret == -EINTR || run_ret == -EAGAIN) { in kvm_cpu_exec()
3226 if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) { in kvm_cpu_exec()
3228 strerror(-run_ret)); in kvm_cpu_exec()
3230 if (run_ret == -EBUSY) { in kvm_cpu_exec()
3237 ret = -1; in kvm_cpu_exec()
3242 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); in kvm_cpu_exec()
3243 switch (run->exit_reason) { in kvm_cpu_exec()
3246 kvm_handle_io(run->io.port, attrs, in kvm_cpu_exec()
3247 (uint8_t *)run + run->io.data_offset, in kvm_cpu_exec()
3248 run->io.direction, in kvm_cpu_exec()
3249 run->io.size, in kvm_cpu_exec()
3250 run->io.count); in kvm_cpu_exec()
3256 run->mmio.phys_addr, attrs, in kvm_cpu_exec()
3257 run->mmio.data, in kvm_cpu_exec()
3258 run->mmio.len, in kvm_cpu_exec()
3259 run->mmio.is_write); in kvm_cpu_exec()
3271 (uint64_t)run->hw.hardware_exit_reason); in kvm_cpu_exec()
3272 ret = -1; in kvm_cpu_exec()
3282 trace_kvm_dirty_ring_full(cpu->cpu_index); in kvm_cpu_exec()
3288 * the miss of sleep, so just reap the ring-fulled vCPU. in kvm_cpu_exec()
3300 trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type); in kvm_cpu_exec()
3301 switch (run->system_event.type) { in kvm_cpu_exec()
3323 trace_kvm_memory_fault(run->memory_fault.gpa, in kvm_cpu_exec()
3324 run->memory_fault.size, in kvm_cpu_exec()
3325 run->memory_fault.flags); in kvm_cpu_exec()
3326 if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) { in kvm_cpu_exec()
3328 (uint64_t)run->memory_fault.flags); in kvm_cpu_exec()
3329 ret = -1; in kvm_cpu_exec()
3332 ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size, in kvm_cpu_exec()
3333 run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE); in kvm_cpu_exec()
3349 qatomic_set(&cpu->exit_request, 0); in kvm_cpu_exec()
3364 ret = ioctl(s->fd, type, arg); in kvm_ioctl()
3365 if (ret == -1) { in kvm_ioctl()
3366 ret = -errno; in kvm_ioctl()
3383 ret = ioctl(s->vmfd, type, arg); in kvm_vm_ioctl()
3384 if (ret == -1) { in kvm_vm_ioctl()
3385 ret = -errno; in kvm_vm_ioctl()
3401 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg); in kvm_vcpu_ioctl()
3403 ret = ioctl(cpu->kvm_fd, type, arg); in kvm_vcpu_ioctl()
3405 if (ret == -1) { in kvm_vcpu_ioctl()
3406 ret = -errno; in kvm_vcpu_ioctl()
3424 if (ret == -1) { in kvm_device_ioctl()
3425 ret = -errno; in kvm_device_ioctl()
3474 error_setg_errno(errp, -err, in kvm_device_access()
3484 return kvm_state->sync_mmu; in kvm_has_sync_mmu()
3489 return kvm_state->vcpu_events; in kvm_has_vcpu_events()
3494 return kvm_state->max_nested_state_len; in kvm_max_nested_state_length()
3516 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) { in kvm_find_sw_breakpoint()
3517 if (bp->pc == pc) { in kvm_find_sw_breakpoint()
3526 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints); in kvm_sw_breakpoints_active()
3539 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, in kvm_invoke_set_guest_debug()
3540 &dbg_data->dbg); in kvm_invoke_set_guest_debug()
3549 if (cpu->singlestep_enabled) { in kvm_update_guest_debug()
3552 if (cpu->singlestep_enabled & SSTEP_NOIRQ) { in kvm_update_guest_debug()
3577 bp->use_count++; in kvm_insert_breakpoint()
3582 bp->pc = addr; in kvm_insert_breakpoint()
3583 bp->use_count = 1; in kvm_insert_breakpoint()
3590 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); in kvm_insert_breakpoint()
3615 return -ENOENT; in kvm_remove_breakpoint()
3618 if (bp->use_count > 1) { in kvm_remove_breakpoint()
3619 bp->use_count--; in kvm_remove_breakpoint()
3628 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); in kvm_remove_breakpoint()
3649 KVMState *s = cpu->kvm_state; in kvm_remove_all_breakpoints()
3652 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { in kvm_remove_all_breakpoints()
3661 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry); in kvm_remove_all_breakpoints()
3681 sigmask->len = s->sigmask_len; in kvm_set_signal_mask()
3682 memcpy(sigmask->sigset, sigset, sizeof(*sigset)); in kvm_set_signal_mask()
3692 assert(kvm_immediate_exit); in kvm_ipi_signal()
3719 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); in kvm_init_cpu_signals()
3734 qatomic_set(&cpu->exit_request, 1); in kvm_on_sigbus_vcpu()
3749 assert(code != BUS_MCEERR_AR); in kvm_on_sigbus()
3763 create_dev.fd = -1; in kvm_create_device()
3767 return -ENOTSUP; in kvm_create_device()
3782 .fd = -1, in kvm_device_supported()
3802 trace_kvm_failed_reg_set(id, strerror(-r)); in kvm_set_one_reg()
3816 trace_kvm_failed_reg_get(id, strerror(-r)); in kvm_get_one_reg()
3827 for (i = 0; i < kvm->nr_as; ++i) { in kvm_accel_has_memory()
3828 if (kvm->as[i].as == as && kvm->as[i].ml) { in kvm_accel_has_memory()
3830 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml, in kvm_accel_has_memory()
3843 int64_t value = s->kvm_shadow_mem; in kvm_get_kvm_shadow_mem()
3855 if (s->fd != -1) { in kvm_set_kvm_shadow_mem()
3864 s->kvm_shadow_mem = value; in kvm_set_kvm_shadow_mem()
3874 if (s->fd != -1) { in kvm_set_kernel_irqchip()
3884 s->kernel_irqchip_allowed = true; in kvm_set_kernel_irqchip()
3885 s->kernel_irqchip_required = true; in kvm_set_kernel_irqchip()
3886 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; in kvm_set_kernel_irqchip()
3889 s->kernel_irqchip_allowed = false; in kvm_set_kernel_irqchip()
3890 s->kernel_irqchip_required = false; in kvm_set_kernel_irqchip()
3891 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; in kvm_set_kernel_irqchip()
3894 s->kernel_irqchip_allowed = true; in kvm_set_kernel_irqchip()
3895 s->kernel_irqchip_required = true; in kvm_set_kernel_irqchip()
3896 s->kernel_irqchip_split = ON_OFF_AUTO_ON; in kvm_set_kernel_irqchip()
3908 return kvm_state->kernel_irqchip_allowed; in kvm_kernel_irqchip_allowed()
3913 return kvm_state->kernel_irqchip_required; in kvm_kernel_irqchip_required()
3918 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON; in kvm_kernel_irqchip_split()
3926 uint32_t value = s->kvm_dirty_ring_size; in kvm_get_dirty_ring_size()
3938 if (s->fd != -1) { in kvm_set_dirty_ring_size()
3946 if (value & (value - 1)) { in kvm_set_dirty_ring_size()
3947 error_setg(errp, "dirty-ring-size must be a power of two."); in kvm_set_dirty_ring_size()
3951 s->kvm_dirty_ring_size = value; in kvm_set_dirty_ring_size()
3959 return g_strdup(s->device); in kvm_get_device()
3968 g_free(s->device); in kvm_set_device()
3969 s->device = g_strdup(value); in kvm_set_device()
3975 s->msr_energy.enable = value; in kvm_set_kvm_rapl()
3983 g_free(s->msr_energy.socket_path); in kvm_set_kvm_rapl_socket_path()
3984 s->msr_energy.socket_path = g_strdup(str); in kvm_set_kvm_rapl_socket_path()
3991 s->fd = -1; in kvm_accel_instance_init()
3992 s->vmfd = -1; in kvm_accel_instance_init()
3993 s->kvm_shadow_mem = -1; in kvm_accel_instance_init()
3994 s->kernel_irqchip_allowed = true; in kvm_accel_instance_init()
3995 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; in kvm_accel_instance_init()
3997 s->kvm_dirty_ring_size = 0; in kvm_accel_instance_init()
3998 s->kvm_dirty_ring_with_bitmap = false; in kvm_accel_instance_init()
3999 s->kvm_eager_split_size = 0; in kvm_accel_instance_init()
4000 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; in kvm_accel_instance_init()
4001 s->notify_window = 0; in kvm_accel_instance_init()
4002 s->xen_version = 0; in kvm_accel_instance_init()
4003 s->xen_gnttab_max_frames = 64; in kvm_accel_instance_init()
4004 s->xen_evtchn_max_pirq = 256; in kvm_accel_instance_init()
4005 s->device = NULL; in kvm_accel_instance_init()
4006 s->msr_energy.enable = false; in kvm_accel_instance_init()
4023 ac->name = "KVM"; in kvm_accel_class_init()
4024 ac->init_machine = kvm_init; in kvm_accel_class_init()
4025 ac->has_memory = kvm_accel_has_memory; in kvm_accel_class_init()
4026 ac->allowed = &kvm_allowed; in kvm_accel_class_init()
4027 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags; in kvm_accel_class_init()
4029 object_class_property_add(oc, "kernel-irqchip", "on|off|split", in kvm_accel_class_init()
4032 object_class_property_set_description(oc, "kernel-irqchip", in kvm_accel_class_init()
4033 "Configure KVM in-kernel irqchip"); in kvm_accel_class_init()
4035 object_class_property_add(oc, "kvm-shadow-mem", "int", in kvm_accel_class_init()
4038 object_class_property_set_description(oc, "kvm-shadow-mem", in kvm_accel_class_init()
4041 object_class_property_add(oc, "dirty-ring-size", "uint32", in kvm_accel_class_init()
4044 object_class_property_set_description(oc, "dirty-ring-size", in kvm_accel_class_init()
4057 object_class_property_add_str(oc, "rapl-helper-socket", NULL, in kvm_accel_class_init()
4059 object_class_property_set_description(oc, "rapl-helper-socket", in kvm_accel_class_init()
4099 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { in add_kvmstat_entry()
4110 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { in add_kvmstat_entry()
4121 switch (pdesc->flags & KVM_STATS_BASE_MASK) { in add_kvmstat_entry()
4131 stats->name = g_strdup(pdesc->name); in add_kvmstat_entry()
4132 stats->value = g_new0(StatsValue, 1); in add_kvmstat_entry()
4134 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) { in add_kvmstat_entry()
4135 stats->value->u.boolean = *stats_data; in add_kvmstat_entry()
4136 stats->value->type = QTYPE_QBOOL; in add_kvmstat_entry()
4137 } else if (pdesc->size == 1) { in add_kvmstat_entry()
4138 stats->value->u.scalar = *stats_data; in add_kvmstat_entry()
4139 stats->value->type = QTYPE_QNUM; in add_kvmstat_entry()
4142 for (i = 0; i < pdesc->size; i++) { in add_kvmstat_entry()
4145 stats->value->u.list = val_list; in add_kvmstat_entry()
4146 stats->value->type = QTYPE_QLIST; in add_kvmstat_entry()
4158 schema_entry->value = g_new0(StatsSchemaValue, 1); in add_kvmschema_entry()
4160 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { in add_kvmschema_entry()
4162 schema_entry->value->type = STATS_TYPE_CUMULATIVE; in add_kvmschema_entry()
4165 schema_entry->value->type = STATS_TYPE_INSTANT; in add_kvmschema_entry()
4168 schema_entry->value->type = STATS_TYPE_PEAK; in add_kvmschema_entry()
4171 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM; in add_kvmschema_entry()
4172 schema_entry->value->bucket_size = pdesc->bucket_size; in add_kvmschema_entry()
4173 schema_entry->value->has_bucket_size = true; in add_kvmschema_entry()
4176 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM; in add_kvmschema_entry()
4182 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { in add_kvmschema_entry()
4186 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4187 schema_entry->value->unit = STATS_UNIT_BOOLEAN; in add_kvmschema_entry()
4190 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4191 schema_entry->value->unit = STATS_UNIT_BYTES; in add_kvmschema_entry()
4194 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4195 schema_entry->value->unit = STATS_UNIT_CYCLES; in add_kvmschema_entry()
4198 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4199 schema_entry->value->unit = STATS_UNIT_SECONDS; in add_kvmschema_entry()
4205 schema_entry->value->exponent = pdesc->exponent; in add_kvmschema_entry()
4206 if (pdesc->exponent) { in add_kvmschema_entry()
4207 switch (pdesc->flags & KVM_STATS_BASE_MASK) { in add_kvmschema_entry()
4209 schema_entry->value->has_base = true; in add_kvmschema_entry()
4210 schema_entry->value->base = 10; in add_kvmschema_entry()
4213 schema_entry->value->has_base = true; in add_kvmschema_entry()
4214 schema_entry->value->base = 2; in add_kvmschema_entry()
4221 schema_entry->value->name = g_strdup(pdesc->name); in add_kvmschema_entry()
4222 schema_entry->next = list; in add_kvmschema_entry()
4225 g_free(schema_entry->value); in add_kvmschema_entry()
4257 if (g_str_equal(descriptors->ident, ident)) { in find_stats_descriptors()
4265 kvm_stats_header = &descriptors->kvm_stats_header; in find_stats_descriptors()
4274 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; in find_stats_descriptors()
4277 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc); in find_stats_descriptors()
4279 size_desc * kvm_stats_header->num_desc, in find_stats_descriptors()
4280 kvm_stats_header->desc_offset); in find_stats_descriptors()
4282 if (ret != size_desc * kvm_stats_header->num_desc) { in find_stats_descriptors()
4285 size_desc * kvm_stats_header->num_desc, ret); in find_stats_descriptors()
4290 descriptors->kvm_stats_desc = kvm_stats_desc; in find_stats_descriptors()
4291 descriptors->ident = ident; in find_stats_descriptors()
4315 kvm_stats_header = &descriptors->kvm_stats_header; in query_stats()
4316 kvm_stats_desc = descriptors->kvm_stats_desc; in query_stats()
4317 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; in query_stats()
4320 for (i = 0; i < kvm_stats_header->num_desc; ++i) { in query_stats()
4322 size_data += pdesc->size * sizeof(*stats_data); in query_stats()
4326 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset); in query_stats()
4334 for (i = 0; i < kvm_stats_header->num_desc; ++i) { in query_stats()
4339 stats = (void *)stats_data + pdesc->offset; in query_stats()
4340 if (!apply_str_list_filter(pdesc->name, names)) { in query_stats()
4356 cpu->parent_obj.canonical_path, in query_stats()
4380 kvm_stats_header = &descriptors->kvm_stats_header; in query_stats_schema()
4381 kvm_stats_desc = descriptors->kvm_stats_desc; in query_stats_schema()
4382 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; in query_stats_schema()
4385 for (i = 0; i < kvm_stats_header->num_desc; ++i) { in query_stats_schema()
4395 int stats_fd = cpu->kvm_vcpu_stats_fd; in query_stats_vcpu()
4398 if (stats_fd == -1) { in query_stats_vcpu()
4400 error_propagate(kvm_stats_args->errp, local_err); in query_stats_vcpu()
4403 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU, in query_stats_vcpu()
4404 kvm_stats_args->names, stats_fd, cpu, in query_stats_vcpu()
4405 kvm_stats_args->errp); in query_stats_vcpu()
4410 int stats_fd = cpu->kvm_vcpu_stats_fd; in query_stats_schema_vcpu()
4413 if (stats_fd == -1) { in query_stats_schema_vcpu()
4415 error_propagate(kvm_stats_args->errp, local_err); in query_stats_schema_vcpu()
4418 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd, in query_stats_schema_vcpu()
4419 kvm_stats_args->errp); in query_stats_schema_vcpu()
4433 if (stats_fd == -1) { in query_stats_cb()
4448 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) { in query_stats_cb()
4467 if (stats_fd == -1) { in query_stats_schemas_cb()
4483 kvm_state->guest_state_protected = true; in kvm_mark_guest_state_protected()
4496 return -1; in kvm_create_guest_memfd()
4502 return -1; in kvm_create_guest_memfd()