xref: /openbmc/qemu/accel/kvm/kvm-all.c (revision 350785d41d8bb0b799dd16ea04a7232dc8d6093a)
1 /*
2  * QEMU KVM support
3  *
4  * Copyright IBM, Corp. 2008
5  *           Red Hat, Inc. 2008
6  *
7  * Authors:
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *  Glauber Costa     <gcosta@redhat.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  *
14  */
15 
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <poll.h>
19 
20 #include <linux/kvm.h>
21 
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "gdbstub/enums.h"
31 #include "system/kvm_int.h"
32 #include "system/runstate.h"
33 #include "system/cpus.h"
34 #include "system/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/tswap.h"
37 #include "system/memory.h"
38 #include "system/ram_addr.h"
39 #include "qemu/event_notifier.h"
40 #include "qemu/main-loop.h"
41 #include "trace.h"
42 #include "hw/irq.h"
43 #include "qapi/visitor.h"
44 #include "qapi/qapi-types-common.h"
45 #include "qapi/qapi-visit-common.h"
46 #include "system/reset.h"
47 #include "qemu/guest-random.h"
48 #include "system/hw_accel.h"
49 #include "kvm-cpus.h"
50 #include "system/dirtylimit.h"
51 #include "qemu/range.h"
52 
53 #include "hw/boards.h"
54 #include "system/stats.h"
55 
56 /* This check must be after config-host.h is included */
57 #ifdef CONFIG_EVENTFD
58 #include <sys/eventfd.h>
59 #endif
60 
61 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
62 # define KVM_HAVE_MCE_INJECTION 1
63 #endif
64 
65 
66 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
67  * need to use the real host PAGE_SIZE, as that's what KVM will use.
68  */
69 #ifdef PAGE_SIZE
70 #undef PAGE_SIZE
71 #endif
72 #define PAGE_SIZE qemu_real_host_page_size()
73 
74 #ifndef KVM_GUESTDBG_BLOCKIRQ
75 #define KVM_GUESTDBG_BLOCKIRQ 0
76 #endif
77 
78 /* Default num of memslots to be allocated when VM starts */
79 #define  KVM_MEMSLOTS_NR_ALLOC_DEFAULT                      16
80 /* Default max allowed memslots if kernel reported nothing */
81 #define  KVM_MEMSLOTS_NR_MAX_DEFAULT                        32
82 
83 struct KVMParkedVcpu {
84     unsigned long vcpu_id;
85     int kvm_fd;
86     QLIST_ENTRY(KVMParkedVcpu) node;
87 };
88 
89 KVMState *kvm_state;
90 bool kvm_kernel_irqchip;
91 bool kvm_split_irqchip;
92 bool kvm_async_interrupts_allowed;
93 bool kvm_halt_in_kernel_allowed;
94 bool kvm_resamplefds_allowed;
95 bool kvm_msi_via_irqfd_allowed;
96 bool kvm_gsi_routing_allowed;
97 bool kvm_gsi_direct_mapping;
98 bool kvm_allowed;
99 bool kvm_readonly_mem_allowed;
100 bool kvm_vm_attributes_allowed;
101 bool kvm_msi_use_devid;
102 bool kvm_pre_fault_memory_supported;
103 static bool kvm_has_guest_debug;
104 static int kvm_sstep_flags;
105 static bool kvm_immediate_exit;
106 static uint64_t kvm_supported_memory_attributes;
107 static bool kvm_guest_memfd_supported;
108 static hwaddr kvm_max_slot_size = ~0;
109 
110 static const KVMCapabilityInfo kvm_required_capabilites[] = {
111     KVM_CAP_INFO(USER_MEMORY),
112     KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
113     KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
114     KVM_CAP_INFO(INTERNAL_ERROR_DATA),
115     KVM_CAP_INFO(IOEVENTFD),
116     KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
117     KVM_CAP_LAST_INFO
118 };
119 
120 static NotifierList kvm_irqchip_change_notifiers =
121     NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
122 
123 struct KVMResampleFd {
124     int gsi;
125     EventNotifier *resample_event;
126     QLIST_ENTRY(KVMResampleFd) node;
127 };
128 typedef struct KVMResampleFd KVMResampleFd;
129 
130 /*
131  * Only used with split irqchip where we need to do the resample fd
132  * kick for the kernel from userspace.
133  */
134 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
135     QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
136 
137 static QemuMutex kml_slots_lock;
138 
139 #define kvm_slots_lock()    qemu_mutex_lock(&kml_slots_lock)
140 #define kvm_slots_unlock()  qemu_mutex_unlock(&kml_slots_lock)
141 
142 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
143 
144 static inline void kvm_resample_fd_remove(int gsi)
145 {
146     KVMResampleFd *rfd;
147 
148     QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
149         if (rfd->gsi == gsi) {
150             QLIST_REMOVE(rfd, node);
151             g_free(rfd);
152             break;
153         }
154     }
155 }
156 
157 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
158 {
159     KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
160 
161     rfd->gsi = gsi;
162     rfd->resample_event = event;
163 
164     QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
165 }
166 
167 void kvm_resample_fd_notify(int gsi)
168 {
169     KVMResampleFd *rfd;
170 
171     QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
172         if (rfd->gsi == gsi) {
173             event_notifier_set(rfd->resample_event);
174             trace_kvm_resample_fd_notify(gsi);
175             return;
176         }
177     }
178 }
179 
180 /**
181  * kvm_slots_grow(): Grow the slots[] array in the KVMMemoryListener
182  *
183  * @kml: The KVMMemoryListener* to grow the slots[] array
184  * @nr_slots_new: The new size of slots[] array
185  *
186  * Returns: True if the array grows larger, false otherwise.
187  */
188 static bool kvm_slots_grow(KVMMemoryListener *kml, unsigned int nr_slots_new)
189 {
190     unsigned int i, cur = kml->nr_slots_allocated;
191     KVMSlot *slots;
192 
193     if (nr_slots_new > kvm_state->nr_slots_max) {
194         nr_slots_new = kvm_state->nr_slots_max;
195     }
196 
197     if (cur >= nr_slots_new) {
198         /* Big enough, no need to grow, or we reached max */
199         return false;
200     }
201 
202     if (cur == 0) {
203         slots = g_new0(KVMSlot, nr_slots_new);
204     } else {
205         assert(kml->slots);
206         slots = g_renew(KVMSlot, kml->slots, nr_slots_new);
207         /*
208          * g_renew() doesn't initialize extended buffers, however kvm
209          * memslots require fields to be zero-initialized. E.g. pointers,
210          * memory_size field, etc.
211          */
212         memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur));
213     }
214 
215     for (i = cur; i < nr_slots_new; i++) {
216         slots[i].slot = i;
217     }
218 
219     kml->slots = slots;
220     kml->nr_slots_allocated = nr_slots_new;
221     trace_kvm_slots_grow(cur, nr_slots_new);
222 
223     return true;
224 }
225 
226 static bool kvm_slots_double(KVMMemoryListener *kml)
227 {
228     return kvm_slots_grow(kml, kml->nr_slots_allocated * 2);
229 }
230 
231 unsigned int kvm_get_max_memslots(void)
232 {
233     KVMState *s = KVM_STATE(current_accel());
234 
235     return s->nr_slots_max;
236 }
237 
238 unsigned int kvm_get_free_memslots(void)
239 {
240     unsigned int used_slots = 0;
241     KVMState *s = kvm_state;
242     int i;
243 
244     kvm_slots_lock();
245     for (i = 0; i < s->nr_as; i++) {
246         if (!s->as[i].ml) {
247             continue;
248         }
249         used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used);
250     }
251     kvm_slots_unlock();
252 
253     return s->nr_slots_max - used_slots;
254 }
255 
256 /* Called with KVMMemoryListener.slots_lock held */
257 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
258 {
259     unsigned int n;
260     int i;
261 
262     for (i = 0; i < kml->nr_slots_allocated; i++) {
263         if (kml->slots[i].memory_size == 0) {
264             return &kml->slots[i];
265         }
266     }
267 
268     /*
269      * If no free slots, try to grow first by doubling.  Cache the old size
270      * here to avoid another round of search: if the grow succeeded, it
271      * means slots[] now must have the existing "n" slots occupied,
272      * followed by one or more free slots starting from slots[n].
273      */
274     n = kml->nr_slots_allocated;
275     if (kvm_slots_double(kml)) {
276         return &kml->slots[n];
277     }
278 
279     return NULL;
280 }
281 
282 /* Called with KVMMemoryListener.slots_lock held */
283 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
284 {
285     KVMSlot *slot = kvm_get_free_slot(kml);
286 
287     if (slot) {
288         return slot;
289     }
290 
291     fprintf(stderr, "%s: no free slot available\n", __func__);
292     abort();
293 }
294 
295 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
296                                          hwaddr start_addr,
297                                          hwaddr size)
298 {
299     int i;
300 
301     for (i = 0; i < kml->nr_slots_allocated; i++) {
302         KVMSlot *mem = &kml->slots[i];
303 
304         if (start_addr == mem->start_addr && size == mem->memory_size) {
305             return mem;
306         }
307     }
308 
309     return NULL;
310 }
311 
312 /*
313  * Calculate and align the start address and the size of the section.
314  * Return the size. If the size is 0, the aligned section is empty.
315  */
316 static hwaddr kvm_align_section(MemoryRegionSection *section,
317                                 hwaddr *start)
318 {
319     hwaddr size = int128_get64(section->size);
320     hwaddr delta, aligned;
321 
322     /* kvm works in page size chunks, but the function may be called
323        with sub-page size and unaligned start address. Pad the start
324        address to next and truncate size to previous page boundary. */
325     aligned = ROUND_UP(section->offset_within_address_space,
326                        qemu_real_host_page_size());
327     delta = aligned - section->offset_within_address_space;
328     *start = aligned;
329     if (delta > size) {
330         return 0;
331     }
332 
333     return (size - delta) & qemu_real_host_page_mask();
334 }
335 
336 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
337                                        hwaddr *phys_addr)
338 {
339     KVMMemoryListener *kml = &s->memory_listener;
340     int i, ret = 0;
341 
342     kvm_slots_lock();
343     for (i = 0; i < kml->nr_slots_allocated; i++) {
344         KVMSlot *mem = &kml->slots[i];
345 
346         if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
347             *phys_addr = mem->start_addr + (ram - mem->ram);
348             ret = 1;
349             break;
350         }
351     }
352     kvm_slots_unlock();
353 
354     return ret;
355 }
356 
357 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
358 {
359     KVMState *s = kvm_state;
360     struct kvm_userspace_memory_region2 mem;
361     int ret;
362 
363     mem.slot = slot->slot | (kml->as_id << 16);
364     mem.guest_phys_addr = slot->start_addr;
365     mem.userspace_addr = (unsigned long)slot->ram;
366     mem.flags = slot->flags;
367     mem.guest_memfd = slot->guest_memfd;
368     mem.guest_memfd_offset = slot->guest_memfd_offset;
369 
370     if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
371         /* Set the slot size to 0 before setting the slot to the desired
372          * value. This is needed based on KVM commit 75d61fbc. */
373         mem.memory_size = 0;
374 
375         if (kvm_guest_memfd_supported) {
376             ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
377         } else {
378             ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
379         }
380         if (ret < 0) {
381             goto err;
382         }
383     }
384     mem.memory_size = slot->memory_size;
385     if (kvm_guest_memfd_supported) {
386         ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
387     } else {
388         ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
389     }
390     slot->old_flags = mem.flags;
391 err:
392     trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags,
393                               mem.guest_phys_addr, mem.memory_size,
394                               mem.userspace_addr, mem.guest_memfd,
395                               mem.guest_memfd_offset, ret);
396     if (ret < 0) {
397         if (kvm_guest_memfd_supported) {
398                 error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d,"
399                         " start=0x%" PRIx64 ", size=0x%" PRIx64 ","
400                         " flags=0x%" PRIx32 ", guest_memfd=%" PRId32 ","
401                         " guest_memfd_offset=0x%" PRIx64 ": %s",
402                         __func__, mem.slot, slot->start_addr,
403                         (uint64_t)mem.memory_size, mem.flags,
404                         mem.guest_memfd, (uint64_t)mem.guest_memfd_offset,
405                         strerror(errno));
406         } else {
407                 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
408                             " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
409                             __func__, mem.slot, slot->start_addr,
410                             (uint64_t)mem.memory_size, strerror(errno));
411         }
412     }
413     return ret;
414 }
415 
416 void kvm_park_vcpu(CPUState *cpu)
417 {
418     struct KVMParkedVcpu *vcpu;
419 
420     trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
421 
422     vcpu = g_malloc0(sizeof(*vcpu));
423     vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
424     vcpu->kvm_fd = cpu->kvm_fd;
425     QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
426 }
427 
428 int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id)
429 {
430     struct KVMParkedVcpu *cpu;
431     int kvm_fd = -ENOENT;
432 
433     QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
434         if (cpu->vcpu_id == vcpu_id) {
435             QLIST_REMOVE(cpu, node);
436             kvm_fd = cpu->kvm_fd;
437             g_free(cpu);
438             break;
439         }
440     }
441 
442     trace_kvm_unpark_vcpu(vcpu_id, kvm_fd > 0 ? "unparked" : "!found parked");
443 
444     return kvm_fd;
445 }
446 
447 static void kvm_reset_parked_vcpus(KVMState *s)
448 {
449     struct KVMParkedVcpu *cpu;
450 
451     QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
452         kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd);
453     }
454 }
455 
456 /**
457  * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
458  * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created.
459  *
460  * @returns: 0 when success, errno (<0) when failed.
461  */
462 static int kvm_create_vcpu(CPUState *cpu)
463 {
464     unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
465     KVMState *s = kvm_state;
466     int kvm_fd;
467 
468     /* check if the KVM vCPU already exist but is parked */
469     kvm_fd = kvm_unpark_vcpu(s, vcpu_id);
470     if (kvm_fd < 0) {
471         /* vCPU not parked: create a new KVM vCPU */
472         kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
473         if (kvm_fd < 0) {
474             error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id);
475             return kvm_fd;
476         }
477     }
478 
479     cpu->kvm_fd = kvm_fd;
480     cpu->kvm_state = s;
481     if (!s->guest_state_protected) {
482         cpu->vcpu_dirty = true;
483     }
484     cpu->dirty_pages = 0;
485     cpu->throttle_us_per_full = 0;
486 
487     trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd);
488 
489     return 0;
490 }
491 
492 int kvm_create_and_park_vcpu(CPUState *cpu)
493 {
494     int ret = 0;
495 
496     ret = kvm_create_vcpu(cpu);
497     if (!ret) {
498         kvm_park_vcpu(cpu);
499     }
500 
501     return ret;
502 }
503 
504 static int do_kvm_destroy_vcpu(CPUState *cpu)
505 {
506     KVMState *s = kvm_state;
507     int mmap_size;
508     int ret = 0;
509 
510     trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
511 
512     ret = kvm_arch_destroy_vcpu(cpu);
513     if (ret < 0) {
514         goto err;
515     }
516 
517     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
518     if (mmap_size < 0) {
519         ret = mmap_size;
520         trace_kvm_failed_get_vcpu_mmap_size();
521         goto err;
522     }
523 
524     /* If I am the CPU that created coalesced_mmio_ring, then discard it */
525     if (s->coalesced_mmio_ring == (void *)cpu->kvm_run + PAGE_SIZE) {
526         s->coalesced_mmio_ring = NULL;
527     }
528 
529     ret = munmap(cpu->kvm_run, mmap_size);
530     if (ret < 0) {
531         goto err;
532     }
533     cpu->kvm_run = NULL;
534 
535     if (cpu->kvm_dirty_gfns) {
536         ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
537         if (ret < 0) {
538             goto err;
539         }
540         cpu->kvm_dirty_gfns = NULL;
541     }
542 
543     kvm_park_vcpu(cpu);
544 err:
545     return ret;
546 }
547 
548 void kvm_destroy_vcpu(CPUState *cpu)
549 {
550     if (do_kvm_destroy_vcpu(cpu) < 0) {
551         error_report("kvm_destroy_vcpu failed");
552         exit(EXIT_FAILURE);
553     }
554 }
555 
556 int kvm_init_vcpu(CPUState *cpu, Error **errp)
557 {
558     KVMState *s = kvm_state;
559     int mmap_size;
560     int ret;
561 
562     trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
563 
564     ret = kvm_arch_pre_create_vcpu(cpu, errp);
565     if (ret < 0) {
566         goto err;
567     }
568 
569     ret = kvm_create_vcpu(cpu);
570     if (ret < 0) {
571         error_setg_errno(errp, -ret,
572                          "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
573                          kvm_arch_vcpu_id(cpu));
574         goto err;
575     }
576 
577     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
578     if (mmap_size < 0) {
579         ret = mmap_size;
580         error_setg_errno(errp, -mmap_size,
581                          "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
582         goto err;
583     }
584 
585     cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
586                         cpu->kvm_fd, 0);
587     if (cpu->kvm_run == MAP_FAILED) {
588         ret = -errno;
589         error_setg_errno(errp, ret,
590                          "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
591                          kvm_arch_vcpu_id(cpu));
592         goto err;
593     }
594 
595     if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
596         s->coalesced_mmio_ring =
597             (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
598     }
599 
600     if (s->kvm_dirty_ring_size) {
601         /* Use MAP_SHARED to share pages with the kernel */
602         cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
603                                    PROT_READ | PROT_WRITE, MAP_SHARED,
604                                    cpu->kvm_fd,
605                                    PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
606         if (cpu->kvm_dirty_gfns == MAP_FAILED) {
607             ret = -errno;
608             goto err;
609         }
610     }
611 
612     ret = kvm_arch_init_vcpu(cpu);
613     if (ret < 0) {
614         error_setg_errno(errp, -ret,
615                          "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
616                          kvm_arch_vcpu_id(cpu));
617     }
618     cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
619 
620 err:
621     return ret;
622 }
623 
624 void kvm_close(void)
625 {
626     CPUState *cpu;
627 
628     if (!kvm_state || kvm_state->fd == -1) {
629         return;
630     }
631 
632     CPU_FOREACH(cpu) {
633         cpu_remove_sync(cpu);
634         close(cpu->kvm_fd);
635         cpu->kvm_fd = -1;
636         close(cpu->kvm_vcpu_stats_fd);
637         cpu->kvm_vcpu_stats_fd = -1;
638     }
639 
640     if (kvm_state && kvm_state->fd != -1) {
641         close(kvm_state->vmfd);
642         kvm_state->vmfd = -1;
643         close(kvm_state->fd);
644         kvm_state->fd = -1;
645     }
646     kvm_state = NULL;
647 }
648 
649 /*
650  * dirty pages logging control
651  */
652 
653 static int kvm_mem_flags(MemoryRegion *mr)
654 {
655     bool readonly = mr->readonly || memory_region_is_romd(mr);
656     int flags = 0;
657 
658     if (memory_region_get_dirty_log_mask(mr) != 0) {
659         flags |= KVM_MEM_LOG_DIRTY_PAGES;
660     }
661     if (readonly && kvm_readonly_mem_allowed) {
662         flags |= KVM_MEM_READONLY;
663     }
664     if (memory_region_has_guest_memfd(mr)) {
665         assert(kvm_guest_memfd_supported);
666         flags |= KVM_MEM_GUEST_MEMFD;
667     }
668     return flags;
669 }
670 
671 /* Called with KVMMemoryListener.slots_lock held */
672 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
673                                  MemoryRegion *mr)
674 {
675     mem->flags = kvm_mem_flags(mr);
676 
677     /* If nothing changed effectively, no need to issue ioctl */
678     if (mem->flags == mem->old_flags) {
679         return 0;
680     }
681 
682     kvm_slot_init_dirty_bitmap(mem);
683     return kvm_set_user_memory_region(kml, mem, false);
684 }
685 
686 static int kvm_section_update_flags(KVMMemoryListener *kml,
687                                     MemoryRegionSection *section)
688 {
689     hwaddr start_addr, size, slot_size;
690     KVMSlot *mem;
691     int ret = 0;
692 
693     size = kvm_align_section(section, &start_addr);
694     if (!size) {
695         return 0;
696     }
697 
698     kvm_slots_lock();
699 
700     while (size && !ret) {
701         slot_size = MIN(kvm_max_slot_size, size);
702         mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
703         if (!mem) {
704             /* We don't have a slot if we want to trap every access. */
705             goto out;
706         }
707 
708         ret = kvm_slot_update_flags(kml, mem, section->mr);
709         start_addr += slot_size;
710         size -= slot_size;
711     }
712 
713 out:
714     kvm_slots_unlock();
715     return ret;
716 }
717 
718 static void kvm_log_start(MemoryListener *listener,
719                           MemoryRegionSection *section,
720                           int old, int new)
721 {
722     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
723     int r;
724 
725     if (old != 0) {
726         return;
727     }
728 
729     r = kvm_section_update_flags(kml, section);
730     if (r < 0) {
731         abort();
732     }
733 }
734 
735 static void kvm_log_stop(MemoryListener *listener,
736                           MemoryRegionSection *section,
737                           int old, int new)
738 {
739     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
740     int r;
741 
742     if (new != 0) {
743         return;
744     }
745 
746     r = kvm_section_update_flags(kml, section);
747     if (r < 0) {
748         abort();
749     }
750 }
751 
752 /* get kvm's dirty pages bitmap and update qemu's */
753 static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
754 {
755     ram_addr_t start = slot->ram_start_offset;
756     ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
757 
758     cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
759 }
760 
761 static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
762 {
763     memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
764 }
765 
766 #define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
767 
768 /* Allocate the dirty bitmap for a slot  */
769 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
770 {
771     if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
772         return;
773     }
774 
775     /*
776      * XXX bad kernel interface alert
777      * For dirty bitmap, kernel allocates array of size aligned to
778      * bits-per-long.  But for case when the kernel is 64bits and
779      * the userspace is 32bits, userspace can't align to the same
780      * bits-per-long, since sizeof(long) is different between kernel
781      * and user space.  This way, userspace will provide buffer which
782      * may be 4 bytes less than the kernel will use, resulting in
783      * userspace memory corruption (which is not detectable by valgrind
784      * too, in most cases).
785      * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
786      * a hope that sizeof(long) won't become >8 any time soon.
787      *
788      * Note: the granule of kvm dirty log is qemu_real_host_page_size.
789      * And mem->memory_size is aligned to it (otherwise this mem can't
790      * be registered to KVM).
791      */
792     hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
793                                         /*HOST_LONG_BITS*/ 64) / 8;
794     mem->dirty_bmap = g_malloc0(bitmap_size);
795     mem->dirty_bmap_size = bitmap_size;
796 }
797 
798 /*
799  * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
800  * succeeded, false otherwise
801  */
802 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
803 {
804     struct kvm_dirty_log d = {};
805     int ret;
806 
807     d.dirty_bitmap = slot->dirty_bmap;
808     d.slot = slot->slot | (slot->as_id << 16);
809     ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
810 
811     if (ret == -ENOENT) {
812         /* kernel does not have dirty bitmap in this slot */
813         ret = 0;
814     }
815     if (ret) {
816         error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
817                           __func__, ret);
818     }
819     return ret == 0;
820 }
821 
822 /* Should be with all slots_lock held for the address spaces. */
823 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
824                                      uint32_t slot_id, uint64_t offset)
825 {
826     KVMMemoryListener *kml;
827     KVMSlot *mem;
828 
829     if (as_id >= s->nr_as) {
830         return;
831     }
832 
833     kml = s->as[as_id].ml;
834     mem = &kml->slots[slot_id];
835 
836     if (!mem->memory_size || offset >=
837         (mem->memory_size / qemu_real_host_page_size())) {
838         return;
839     }
840 
841     set_bit(offset, mem->dirty_bmap);
842 }
843 
844 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
845 {
846     /*
847      * Read the flags before the value.  Pairs with barrier in
848      * KVM's kvm_dirty_ring_push() function.
849      */
850     return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
851 }
852 
853 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
854 {
855     /*
856      * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
857      * sees the full content of the ring:
858      *
859      * CPU0                     CPU1                         CPU2
860      * ------------------------------------------------------------------------------
861      *                                                       fill gfn0
862      *                                                       store-rel flags for gfn0
863      * load-acq flags for gfn0
864      * store-rel RESET for gfn0
865      *                          ioctl(RESET_RINGS)
866      *                            load-acq flags for gfn0
867      *                            check if flags have RESET
868      *
869      * The synchronization goes from CPU2 to CPU0 to CPU1.
870      */
871     qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
872 }
873 
874 /*
875  * Should be with all slots_lock held for the address spaces.  It returns the
876  * dirty page we've collected on this dirty ring.
877  */
878 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
879 {
880     struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
881     uint32_t ring_size = s->kvm_dirty_ring_size;
882     uint32_t count = 0, fetch = cpu->kvm_fetch_index;
883 
884     /*
885      * It's possible that we race with vcpu creation code where the vcpu is
886      * put onto the vcpus list but not yet initialized the dirty ring
887      * structures.  If so, skip it.
888      */
889     if (!cpu->created) {
890         return 0;
891     }
892 
893     assert(dirty_gfns && ring_size);
894     trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
895 
896     while (true) {
897         cur = &dirty_gfns[fetch % ring_size];
898         if (!dirty_gfn_is_dirtied(cur)) {
899             break;
900         }
901         kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
902                                  cur->offset);
903         dirty_gfn_set_collected(cur);
904         trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
905         fetch++;
906         count++;
907     }
908     cpu->kvm_fetch_index = fetch;
909     cpu->dirty_pages += count;
910 
911     return count;
912 }
913 
914 /* Must be with slots_lock held */
915 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
916 {
917     int ret;
918     uint64_t total = 0;
919     int64_t stamp;
920 
921     stamp = get_clock();
922 
923     if (cpu) {
924         total = kvm_dirty_ring_reap_one(s, cpu);
925     } else {
926         CPU_FOREACH(cpu) {
927             total += kvm_dirty_ring_reap_one(s, cpu);
928         }
929     }
930 
931     if (total) {
932         ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
933         assert(ret == total);
934     }
935 
936     stamp = get_clock() - stamp;
937 
938     if (total) {
939         trace_kvm_dirty_ring_reap(total, stamp / 1000);
940     }
941 
942     return total;
943 }
944 
945 /*
946  * Currently for simplicity, we must hold BQL before calling this.  We can
947  * consider to drop the BQL if we're clear with all the race conditions.
948  */
949 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
950 {
951     uint64_t total;
952 
953     /*
954      * We need to lock all kvm slots for all address spaces here,
955      * because:
956      *
957      * (1) We need to mark dirty for dirty bitmaps in multiple slots
958      *     and for tons of pages, so it's better to take the lock here
959      *     once rather than once per page.  And more importantly,
960      *
961      * (2) We must _NOT_ publish dirty bits to the other threads
962      *     (e.g., the migration thread) via the kvm memory slot dirty
963      *     bitmaps before correctly re-protect those dirtied pages.
964      *     Otherwise we can have potential risk of data corruption if
965      *     the page data is read in the other thread before we do
966      *     reset below.
967      */
968     kvm_slots_lock();
969     total = kvm_dirty_ring_reap_locked(s, cpu);
970     kvm_slots_unlock();
971 
972     return total;
973 }
974 
975 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
976 {
977     /* No need to do anything */
978 }
979 
980 /*
981  * Kick all vcpus out in a synchronized way.  When returned, we
982  * guarantee that every vcpu has been kicked and at least returned to
983  * userspace once.
984  */
985 static void kvm_cpu_synchronize_kick_all(void)
986 {
987     CPUState *cpu;
988 
989     CPU_FOREACH(cpu) {
990         run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
991     }
992 }
993 
994 /*
995  * Flush all the existing dirty pages to the KVM slot buffers.  When
996  * this call returns, we guarantee that all the touched dirty pages
997  * before calling this function have been put into the per-kvmslot
998  * dirty bitmap.
999  *
1000  * This function must be called with BQL held.
1001  */
1002 static void kvm_dirty_ring_flush(void)
1003 {
1004     trace_kvm_dirty_ring_flush(0);
1005     /*
1006      * The function needs to be serialized.  Since this function
1007      * should always be with BQL held, serialization is guaranteed.
1008      * However, let's be sure of it.
1009      */
1010     assert(bql_locked());
1011     /*
1012      * First make sure to flush the hardware buffers by kicking all
1013      * vcpus out in a synchronous way.
1014      */
1015     kvm_cpu_synchronize_kick_all();
1016     kvm_dirty_ring_reap(kvm_state, NULL);
1017     trace_kvm_dirty_ring_flush(1);
1018 }
1019 
1020 /**
1021  * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
1022  *
1023  * This function will first try to fetch dirty bitmap from the kernel,
1024  * and then updates qemu's dirty bitmap.
1025  *
1026  * NOTE: caller must be with kml->slots_lock held.
1027  *
1028  * @kml: the KVM memory listener object
1029  * @section: the memory section to sync the dirty bitmap with
1030  */
1031 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
1032                                            MemoryRegionSection *section)
1033 {
1034     KVMState *s = kvm_state;
1035     KVMSlot *mem;
1036     hwaddr start_addr, size;
1037     hwaddr slot_size;
1038 
1039     size = kvm_align_section(section, &start_addr);
1040     while (size) {
1041         slot_size = MIN(kvm_max_slot_size, size);
1042         mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1043         if (!mem) {
1044             /* We don't have a slot if we want to trap every access. */
1045             return;
1046         }
1047         if (kvm_slot_get_dirty_log(s, mem)) {
1048             kvm_slot_sync_dirty_pages(mem);
1049         }
1050         start_addr += slot_size;
1051         size -= slot_size;
1052     }
1053 }
1054 
1055 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
1056 #define KVM_CLEAR_LOG_SHIFT  6
1057 #define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
1058 #define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)
1059 
1060 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
1061                                   uint64_t size)
1062 {
1063     KVMState *s = kvm_state;
1064     uint64_t end, bmap_start, start_delta, bmap_npages;
1065     struct kvm_clear_dirty_log d;
1066     unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
1067     int ret;
1068 
1069     /*
1070      * We need to extend either the start or the size or both to
1071      * satisfy the KVM interface requirement.  Firstly, do the start
1072      * page alignment on 64 host pages
1073      */
1074     bmap_start = start & KVM_CLEAR_LOG_MASK;
1075     start_delta = start - bmap_start;
1076     bmap_start /= psize;
1077 
1078     /*
1079      * The kernel interface has restriction on the size too, that either:
1080      *
1081      * (1) the size is 64 host pages aligned (just like the start), or
1082      * (2) the size fills up until the end of the KVM memslot.
1083      */
1084     bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
1085         << KVM_CLEAR_LOG_SHIFT;
1086     end = mem->memory_size / psize;
1087     if (bmap_npages > end - bmap_start) {
1088         bmap_npages = end - bmap_start;
1089     }
1090     start_delta /= psize;
1091 
1092     /*
1093      * Prepare the bitmap to clear dirty bits.  Here we must guarantee
1094      * that we won't clear any unknown dirty bits otherwise we might
1095      * accidentally clear some set bits which are not yet synced from
1096      * the kernel into QEMU's bitmap, then we'll lose track of the
1097      * guest modifications upon those pages (which can directly lead
1098      * to guest data loss or panic after migration).
1099      *
1100      * Layout of the KVMSlot.dirty_bmap:
1101      *
1102      *                   |<-------- bmap_npages -----------..>|
1103      *                                                     [1]
1104      *                     start_delta         size
1105      *  |----------------|-------------|------------------|------------|
1106      *  ^                ^             ^                               ^
1107      *  |                |             |                               |
1108      * start          bmap_start     (start)                         end
1109      * of memslot                                             of memslot
1110      *
1111      * [1] bmap_npages can be aligned to either 64 pages or the end of slot
1112      */
1113 
1114     assert(bmap_start % BITS_PER_LONG == 0);
1115     /* We should never do log_clear before log_sync */
1116     assert(mem->dirty_bmap);
1117     if (start_delta || bmap_npages - size / psize) {
1118         /* Slow path - we need to manipulate a temp bitmap */
1119         bmap_clear = bitmap_new(bmap_npages);
1120         bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
1121                                     bmap_start, start_delta + size / psize);
1122         /*
1123          * We need to fill the holes at start because that was not
1124          * specified by the caller and we extended the bitmap only for
1125          * 64 pages alignment
1126          */
1127         bitmap_clear(bmap_clear, 0, start_delta);
1128         d.dirty_bitmap = bmap_clear;
1129     } else {
1130         /*
1131          * Fast path - both start and size align well with BITS_PER_LONG
1132          * (or the end of memory slot)
1133          */
1134         d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
1135     }
1136 
1137     d.first_page = bmap_start;
1138     /* It should never overflow.  If it happens, say something */
1139     assert(bmap_npages <= UINT32_MAX);
1140     d.num_pages = bmap_npages;
1141     d.slot = mem->slot | (as_id << 16);
1142 
1143     ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
1144     if (ret < 0 && ret != -ENOENT) {
1145         error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
1146                      "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
1147                      __func__, d.slot, (uint64_t)d.first_page,
1148                      (uint32_t)d.num_pages, ret);
1149     } else {
1150         ret = 0;
1151         trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
1152     }
1153 
1154     /*
1155      * After we have updated the remote dirty bitmap, we update the
1156      * cached bitmap as well for the memslot, then if another user
1157      * clears the same region we know we shouldn't clear it again on
1158      * the remote otherwise it's data loss as well.
1159      */
1160     bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
1161                  size / psize);
1162     /* This handles the NULL case well */
1163     g_free(bmap_clear);
1164     return ret;
1165 }
1166 
1167 
1168 /**
1169  * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1170  *
1171  * NOTE: this will be a no-op if we haven't enabled manual dirty log
1172  * protection in the host kernel because in that case this operation
1173  * will be done within log_sync().
1174  *
1175  * @kml:     the kvm memory listener
1176  * @section: the memory range to clear dirty bitmap
1177  */
1178 static int kvm_physical_log_clear(KVMMemoryListener *kml,
1179                                   MemoryRegionSection *section)
1180 {
1181     KVMState *s = kvm_state;
1182     uint64_t start, size, offset, count;
1183     KVMSlot *mem;
1184     int ret = 0, i;
1185 
1186     if (!s->manual_dirty_log_protect) {
1187         /* No need to do explicit clear */
1188         return ret;
1189     }
1190 
1191     start = section->offset_within_address_space;
1192     size = int128_get64(section->size);
1193 
1194     if (!size) {
1195         /* Nothing more we can do... */
1196         return ret;
1197     }
1198 
1199     kvm_slots_lock();
1200 
1201     for (i = 0; i < kml->nr_slots_allocated; i++) {
1202         mem = &kml->slots[i];
1203         /* Discard slots that are empty or do not overlap the section */
1204         if (!mem->memory_size ||
1205             mem->start_addr > start + size - 1 ||
1206             start > mem->start_addr + mem->memory_size - 1) {
1207             continue;
1208         }
1209 
1210         if (start >= mem->start_addr) {
1211             /* The slot starts before section or is aligned to it.  */
1212             offset = start - mem->start_addr;
1213             count = MIN(mem->memory_size - offset, size);
1214         } else {
1215             /* The slot starts after section.  */
1216             offset = 0;
1217             count = MIN(mem->memory_size, size - (mem->start_addr - start));
1218         }
1219         ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1220         if (ret < 0) {
1221             break;
1222         }
1223     }
1224 
1225     kvm_slots_unlock();
1226 
1227     return ret;
1228 }
1229 
1230 static void kvm_coalesce_mmio_region(MemoryListener *listener,
1231                                      MemoryRegionSection *secion,
1232                                      hwaddr start, hwaddr size)
1233 {
1234     KVMState *s = kvm_state;
1235 
1236     if (s->coalesced_mmio) {
1237         struct kvm_coalesced_mmio_zone zone;
1238 
1239         zone.addr = start;
1240         zone.size = size;
1241         zone.pad = 0;
1242 
1243         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1244     }
1245 }
1246 
1247 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1248                                        MemoryRegionSection *secion,
1249                                        hwaddr start, hwaddr size)
1250 {
1251     KVMState *s = kvm_state;
1252 
1253     if (s->coalesced_mmio) {
1254         struct kvm_coalesced_mmio_zone zone;
1255 
1256         zone.addr = start;
1257         zone.size = size;
1258         zone.pad = 0;
1259 
1260         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1261     }
1262 }
1263 
1264 static void kvm_coalesce_pio_add(MemoryListener *listener,
1265                                 MemoryRegionSection *section,
1266                                 hwaddr start, hwaddr size)
1267 {
1268     KVMState *s = kvm_state;
1269 
1270     if (s->coalesced_pio) {
1271         struct kvm_coalesced_mmio_zone zone;
1272 
1273         zone.addr = start;
1274         zone.size = size;
1275         zone.pio = 1;
1276 
1277         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1278     }
1279 }
1280 
1281 static void kvm_coalesce_pio_del(MemoryListener *listener,
1282                                 MemoryRegionSection *section,
1283                                 hwaddr start, hwaddr size)
1284 {
1285     KVMState *s = kvm_state;
1286 
1287     if (s->coalesced_pio) {
1288         struct kvm_coalesced_mmio_zone zone;
1289 
1290         zone.addr = start;
1291         zone.size = size;
1292         zone.pio = 1;
1293 
1294         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1295      }
1296 }
1297 
1298 int kvm_check_extension(KVMState *s, unsigned int extension)
1299 {
1300     int ret;
1301 
1302     ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1303     if (ret < 0) {
1304         ret = 0;
1305     }
1306 
1307     return ret;
1308 }
1309 
1310 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1311 {
1312     int ret;
1313 
1314     ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1315     if (ret < 0) {
1316         /* VM wide version not implemented, use global one instead */
1317         ret = kvm_check_extension(s, extension);
1318     }
1319 
1320     return ret;
1321 }
1322 
1323 /*
1324  * We track the poisoned pages to be able to:
1325  * - replace them on VM reset
1326  * - block a migration for a VM with a poisoned page
1327  */
1328 typedef struct HWPoisonPage {
1329     ram_addr_t ram_addr;
1330     QLIST_ENTRY(HWPoisonPage) list;
1331 } HWPoisonPage;
1332 
1333 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1334     QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1335 
1336 static void kvm_unpoison_all(void *param)
1337 {
1338     HWPoisonPage *page, *next_page;
1339 
1340     QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1341         QLIST_REMOVE(page, list);
1342         qemu_ram_remap(page->ram_addr);
1343         g_free(page);
1344     }
1345 }
1346 
1347 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1348 {
1349     HWPoisonPage *page;
1350 
1351     QLIST_FOREACH(page, &hwpoison_page_list, list) {
1352         if (page->ram_addr == ram_addr) {
1353             return;
1354         }
1355     }
1356     page = g_new(HWPoisonPage, 1);
1357     page->ram_addr = ram_addr;
1358     QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1359 }
1360 
1361 bool kvm_hwpoisoned_mem(void)
1362 {
1363     return !QLIST_EMPTY(&hwpoison_page_list);
1364 }
1365 
1366 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1367 {
1368     if (target_needs_bswap()) {
1369         /*
1370          * The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1371          * endianness, but the memory core hands them in target endianness.
1372          * For example, PPC is always treated as big-endian even if running
1373          * on KVM and on PPC64LE.  Correct here, swapping back.
1374          */
1375         switch (size) {
1376         case 2:
1377             val = bswap16(val);
1378             break;
1379         case 4:
1380             val = bswap32(val);
1381             break;
1382         }
1383     }
1384     return val;
1385 }
1386 
1387 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1388                                   bool assign, uint32_t size, bool datamatch)
1389 {
1390     int ret;
1391     struct kvm_ioeventfd iofd = {
1392         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1393         .addr = addr,
1394         .len = size,
1395         .flags = 0,
1396         .fd = fd,
1397     };
1398 
1399     trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1400                                  datamatch);
1401     if (!kvm_enabled()) {
1402         return -ENOSYS;
1403     }
1404 
1405     if (datamatch) {
1406         iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1407     }
1408     if (!assign) {
1409         iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1410     }
1411 
1412     ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1413 
1414     if (ret < 0) {
1415         return -errno;
1416     }
1417 
1418     return 0;
1419 }
1420 
1421 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1422                                  bool assign, uint32_t size, bool datamatch)
1423 {
1424     struct kvm_ioeventfd kick = {
1425         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1426         .addr = addr,
1427         .flags = KVM_IOEVENTFD_FLAG_PIO,
1428         .len = size,
1429         .fd = fd,
1430     };
1431     int r;
1432     trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1433     if (!kvm_enabled()) {
1434         return -ENOSYS;
1435     }
1436     if (datamatch) {
1437         kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1438     }
1439     if (!assign) {
1440         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1441     }
1442     r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1443     if (r < 0) {
1444         return r;
1445     }
1446     return 0;
1447 }
1448 
1449 
1450 static const KVMCapabilityInfo *
1451 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1452 {
1453     while (list->name) {
1454         if (!kvm_check_extension(s, list->value)) {
1455             return list;
1456         }
1457         list++;
1458     }
1459     return NULL;
1460 }
1461 
1462 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1463 {
1464     g_assert(
1465         ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1466     );
1467     kvm_max_slot_size = max_slot_size;
1468 }
1469 
1470 static int kvm_set_memory_attributes(hwaddr start, uint64_t size, uint64_t attr)
1471 {
1472     struct kvm_memory_attributes attrs;
1473     int r;
1474 
1475     assert((attr & kvm_supported_memory_attributes) == attr);
1476     attrs.attributes = attr;
1477     attrs.address = start;
1478     attrs.size = size;
1479     attrs.flags = 0;
1480 
1481     r = kvm_vm_ioctl(kvm_state, KVM_SET_MEMORY_ATTRIBUTES, &attrs);
1482     if (r) {
1483         error_report("failed to set memory (0x%" HWADDR_PRIx "+0x%" PRIx64 ") "
1484                      "with attr 0x%" PRIx64 " error '%s'",
1485                      start, size, attr, strerror(errno));
1486     }
1487     return r;
1488 }
1489 
1490 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size)
1491 {
1492     return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
1493 }
1494 
1495 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size)
1496 {
1497     return kvm_set_memory_attributes(start, size, 0);
1498 }
1499 
1500 /* Called with KVMMemoryListener.slots_lock held */
1501 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1502                              MemoryRegionSection *section, bool add)
1503 {
1504     KVMSlot *mem;
1505     int err;
1506     MemoryRegion *mr = section->mr;
1507     bool writable = !mr->readonly && !mr->rom_device;
1508     hwaddr start_addr, size, slot_size, mr_offset;
1509     ram_addr_t ram_start_offset;
1510     void *ram;
1511 
1512     if (!memory_region_is_ram(mr)) {
1513         if (writable || !kvm_readonly_mem_allowed) {
1514             return;
1515         } else if (!mr->romd_mode) {
1516             /* If the memory device is not in romd_mode, then we actually want
1517              * to remove the kvm memory slot so all accesses will trap. */
1518             add = false;
1519         }
1520     }
1521 
1522     size = kvm_align_section(section, &start_addr);
1523     if (!size) {
1524         return;
1525     }
1526 
1527     /* The offset of the kvmslot within the memory region */
1528     mr_offset = section->offset_within_region + start_addr -
1529         section->offset_within_address_space;
1530 
1531     /* use aligned delta to align the ram address and offset */
1532     ram = memory_region_get_ram_ptr(mr) + mr_offset;
1533     ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1534 
1535     if (!add) {
1536         do {
1537             slot_size = MIN(kvm_max_slot_size, size);
1538             mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1539             if (!mem) {
1540                 return;
1541             }
1542             if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1543                 /*
1544                  * NOTE: We should be aware of the fact that here we're only
1545                  * doing a best effort to sync dirty bits.  No matter whether
1546                  * we're using dirty log or dirty ring, we ignored two facts:
1547                  *
1548                  * (1) dirty bits can reside in hardware buffers (PML)
1549                  *
1550                  * (2) after we collected dirty bits here, pages can be dirtied
1551                  * again before we do the final KVM_SET_USER_MEMORY_REGION to
1552                  * remove the slot.
1553                  *
1554                  * Not easy.  Let's cross the fingers until it's fixed.
1555                  */
1556                 if (kvm_state->kvm_dirty_ring_size) {
1557                     kvm_dirty_ring_reap_locked(kvm_state, NULL);
1558                     if (kvm_state->kvm_dirty_ring_with_bitmap) {
1559                         kvm_slot_sync_dirty_pages(mem);
1560                         kvm_slot_get_dirty_log(kvm_state, mem);
1561                     }
1562                 } else {
1563                     kvm_slot_get_dirty_log(kvm_state, mem);
1564                 }
1565                 kvm_slot_sync_dirty_pages(mem);
1566             }
1567 
1568             /* unregister the slot */
1569             g_free(mem->dirty_bmap);
1570             mem->dirty_bmap = NULL;
1571             mem->memory_size = 0;
1572             mem->flags = 0;
1573             err = kvm_set_user_memory_region(kml, mem, false);
1574             if (err) {
1575                 fprintf(stderr, "%s: error unregistering slot: %s\n",
1576                         __func__, strerror(-err));
1577                 abort();
1578             }
1579             start_addr += slot_size;
1580             size -= slot_size;
1581             kml->nr_slots_used--;
1582         } while (size);
1583         return;
1584     }
1585 
1586     /* register the new slot */
1587     do {
1588         slot_size = MIN(kvm_max_slot_size, size);
1589         mem = kvm_alloc_slot(kml);
1590         mem->as_id = kml->as_id;
1591         mem->memory_size = slot_size;
1592         mem->start_addr = start_addr;
1593         mem->ram_start_offset = ram_start_offset;
1594         mem->ram = ram;
1595         mem->flags = kvm_mem_flags(mr);
1596         mem->guest_memfd = mr->ram_block->guest_memfd;
1597         mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host;
1598 
1599         kvm_slot_init_dirty_bitmap(mem);
1600         err = kvm_set_user_memory_region(kml, mem, true);
1601         if (err) {
1602             fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1603                     strerror(-err));
1604             abort();
1605         }
1606 
1607         if (memory_region_has_guest_memfd(mr)) {
1608             err = kvm_set_memory_attributes_private(start_addr, slot_size);
1609             if (err) {
1610                 error_report("%s: failed to set memory attribute private: %s",
1611                              __func__, strerror(-err));
1612                 exit(1);
1613             }
1614         }
1615 
1616         start_addr += slot_size;
1617         ram_start_offset += slot_size;
1618         ram += slot_size;
1619         size -= slot_size;
1620         kml->nr_slots_used++;
1621     } while (size);
1622 }
1623 
1624 static void *kvm_dirty_ring_reaper_thread(void *data)
1625 {
1626     KVMState *s = data;
1627     struct KVMDirtyRingReaper *r = &s->reaper;
1628 
1629     rcu_register_thread();
1630 
1631     trace_kvm_dirty_ring_reaper("init");
1632 
1633     while (true) {
1634         r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1635         trace_kvm_dirty_ring_reaper("wait");
1636         /*
1637          * TODO: provide a smarter timeout rather than a constant?
1638          */
1639         sleep(1);
1640 
1641         /* keep sleeping so that dirtylimit not be interfered by reaper */
1642         if (dirtylimit_in_service()) {
1643             continue;
1644         }
1645 
1646         trace_kvm_dirty_ring_reaper("wakeup");
1647         r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1648 
1649         bql_lock();
1650         kvm_dirty_ring_reap(s, NULL);
1651         bql_unlock();
1652 
1653         r->reaper_iteration++;
1654     }
1655 
1656     g_assert_not_reached();
1657 }
1658 
1659 static void kvm_dirty_ring_reaper_init(KVMState *s)
1660 {
1661     struct KVMDirtyRingReaper *r = &s->reaper;
1662 
1663     qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1664                        kvm_dirty_ring_reaper_thread,
1665                        s, QEMU_THREAD_JOINABLE);
1666 }
1667 
1668 static int kvm_dirty_ring_init(KVMState *s)
1669 {
1670     uint32_t ring_size = s->kvm_dirty_ring_size;
1671     uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
1672     unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
1673     int ret;
1674 
1675     s->kvm_dirty_ring_size = 0;
1676     s->kvm_dirty_ring_bytes = 0;
1677 
1678     /* Bail if the dirty ring size isn't specified */
1679     if (!ring_size) {
1680         return 0;
1681     }
1682 
1683     /*
1684      * Read the max supported pages. Fall back to dirty logging mode
1685      * if the dirty ring isn't supported.
1686      */
1687     ret = kvm_vm_check_extension(s, capability);
1688     if (ret <= 0) {
1689         capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
1690         ret = kvm_vm_check_extension(s, capability);
1691     }
1692 
1693     if (ret <= 0) {
1694         warn_report("KVM dirty ring not available, using bitmap method");
1695         return 0;
1696     }
1697 
1698     if (ring_bytes > ret) {
1699         error_report("KVM dirty ring size %" PRIu32 " too big "
1700                      "(maximum is %ld).  Please use a smaller value.",
1701                      ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
1702         return -EINVAL;
1703     }
1704 
1705     ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
1706     if (ret) {
1707         error_report("Enabling of KVM dirty ring failed: %s. "
1708                      "Suggested minimum value is 1024.", strerror(-ret));
1709         return -EIO;
1710     }
1711 
1712     /* Enable the backup bitmap if it is supported */
1713     ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
1714     if (ret > 0) {
1715         ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
1716         if (ret) {
1717             error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1718                          "%s. ", strerror(-ret));
1719             return -EIO;
1720         }
1721 
1722         s->kvm_dirty_ring_with_bitmap = true;
1723     }
1724 
1725     s->kvm_dirty_ring_size = ring_size;
1726     s->kvm_dirty_ring_bytes = ring_bytes;
1727 
1728     return 0;
1729 }
1730 
1731 static void kvm_region_add(MemoryListener *listener,
1732                            MemoryRegionSection *section)
1733 {
1734     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1735     KVMMemoryUpdate *update;
1736 
1737     update = g_new0(KVMMemoryUpdate, 1);
1738     update->section = *section;
1739 
1740     QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
1741 }
1742 
1743 static void kvm_region_del(MemoryListener *listener,
1744                            MemoryRegionSection *section)
1745 {
1746     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1747     KVMMemoryUpdate *update;
1748 
1749     update = g_new0(KVMMemoryUpdate, 1);
1750     update->section = *section;
1751 
1752     QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
1753 }
1754 
1755 static void kvm_region_commit(MemoryListener *listener)
1756 {
1757     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
1758                                           listener);
1759     KVMMemoryUpdate *u1, *u2;
1760     bool need_inhibit = false;
1761 
1762     if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
1763         QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1764         return;
1765     }
1766 
1767     /*
1768      * We have to be careful when regions to add overlap with ranges to remove.
1769      * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1770      * is currently active.
1771      *
1772      * The lists are order by addresses, so it's easy to find overlaps.
1773      */
1774     u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1775     u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
1776     while (u1 && u2) {
1777         Range r1, r2;
1778 
1779         range_init_nofail(&r1, u1->section.offset_within_address_space,
1780                           int128_get64(u1->section.size));
1781         range_init_nofail(&r2, u2->section.offset_within_address_space,
1782                           int128_get64(u2->section.size));
1783 
1784         if (range_overlaps_range(&r1, &r2)) {
1785             need_inhibit = true;
1786             break;
1787         }
1788         if (range_lob(&r1) < range_lob(&r2)) {
1789             u1 = QSIMPLEQ_NEXT(u1, next);
1790         } else {
1791             u2 = QSIMPLEQ_NEXT(u2, next);
1792         }
1793     }
1794 
1795     kvm_slots_lock();
1796     if (need_inhibit) {
1797         accel_ioctl_inhibit_begin();
1798     }
1799 
1800     /* Remove all memslots before adding the new ones. */
1801     while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1802         u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1803         QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
1804 
1805         kvm_set_phys_mem(kml, &u1->section, false);
1806         memory_region_unref(u1->section.mr);
1807 
1808         g_free(u1);
1809     }
1810     while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
1811         u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
1812         QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
1813 
1814         memory_region_ref(u1->section.mr);
1815         kvm_set_phys_mem(kml, &u1->section, true);
1816 
1817         g_free(u1);
1818     }
1819 
1820     if (need_inhibit) {
1821         accel_ioctl_inhibit_end();
1822     }
1823     kvm_slots_unlock();
1824 }
1825 
1826 static void kvm_log_sync(MemoryListener *listener,
1827                          MemoryRegionSection *section)
1828 {
1829     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1830 
1831     kvm_slots_lock();
1832     kvm_physical_sync_dirty_bitmap(kml, section);
1833     kvm_slots_unlock();
1834 }
1835 
1836 static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
1837 {
1838     KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1839     KVMState *s = kvm_state;
1840     KVMSlot *mem;
1841     int i;
1842 
1843     /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1844     kvm_dirty_ring_flush();
1845 
1846     kvm_slots_lock();
1847     for (i = 0; i < kml->nr_slots_allocated; i++) {
1848         mem = &kml->slots[i];
1849         if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1850             kvm_slot_sync_dirty_pages(mem);
1851 
1852             if (s->kvm_dirty_ring_with_bitmap && last_stage &&
1853                 kvm_slot_get_dirty_log(s, mem)) {
1854                 kvm_slot_sync_dirty_pages(mem);
1855             }
1856 
1857             /*
1858              * This is not needed by KVM_GET_DIRTY_LOG because the
1859              * ioctl will unconditionally overwrite the whole region.
1860              * However kvm dirty ring has no such side effect.
1861              */
1862             kvm_slot_reset_dirty_pages(mem);
1863         }
1864     }
1865     kvm_slots_unlock();
1866 }
1867 
1868 static void kvm_log_clear(MemoryListener *listener,
1869                           MemoryRegionSection *section)
1870 {
1871     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1872     int r;
1873 
1874     r = kvm_physical_log_clear(kml, section);
1875     if (r < 0) {
1876         error_report_once("%s: kvm log clear failed: mr=%s "
1877                           "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1878                           section->mr->name, section->offset_within_region,
1879                           int128_get64(section->size));
1880         abort();
1881     }
1882 }
1883 
1884 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1885                                   MemoryRegionSection *section,
1886                                   bool match_data, uint64_t data,
1887                                   EventNotifier *e)
1888 {
1889     int fd = event_notifier_get_fd(e);
1890     int r;
1891 
1892     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1893                                data, true, int128_get64(section->size),
1894                                match_data);
1895     if (r < 0) {
1896         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1897                 __func__, strerror(-r), -r);
1898         abort();
1899     }
1900 }
1901 
1902 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1903                                   MemoryRegionSection *section,
1904                                   bool match_data, uint64_t data,
1905                                   EventNotifier *e)
1906 {
1907     int fd = event_notifier_get_fd(e);
1908     int r;
1909 
1910     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1911                                data, false, int128_get64(section->size),
1912                                match_data);
1913     if (r < 0) {
1914         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1915                 __func__, strerror(-r), -r);
1916         abort();
1917     }
1918 }
1919 
1920 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1921                                  MemoryRegionSection *section,
1922                                  bool match_data, uint64_t data,
1923                                  EventNotifier *e)
1924 {
1925     int fd = event_notifier_get_fd(e);
1926     int r;
1927 
1928     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1929                               data, true, int128_get64(section->size),
1930                               match_data);
1931     if (r < 0) {
1932         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1933                 __func__, strerror(-r), -r);
1934         abort();
1935     }
1936 }
1937 
1938 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1939                                  MemoryRegionSection *section,
1940                                  bool match_data, uint64_t data,
1941                                  EventNotifier *e)
1942 
1943 {
1944     int fd = event_notifier_get_fd(e);
1945     int r;
1946 
1947     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1948                               data, false, int128_get64(section->size),
1949                               match_data);
1950     if (r < 0) {
1951         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1952                 __func__, strerror(-r), -r);
1953         abort();
1954     }
1955 }
1956 
1957 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1958                                   AddressSpace *as, int as_id, const char *name)
1959 {
1960     int i;
1961 
1962     kml->as_id = as_id;
1963 
1964     kvm_slots_grow(kml, KVM_MEMSLOTS_NR_ALLOC_DEFAULT);
1965 
1966     QSIMPLEQ_INIT(&kml->transaction_add);
1967     QSIMPLEQ_INIT(&kml->transaction_del);
1968 
1969     kml->listener.region_add = kvm_region_add;
1970     kml->listener.region_del = kvm_region_del;
1971     kml->listener.commit = kvm_region_commit;
1972     kml->listener.log_start = kvm_log_start;
1973     kml->listener.log_stop = kvm_log_stop;
1974     kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
1975     kml->listener.name = name;
1976 
1977     if (s->kvm_dirty_ring_size) {
1978         kml->listener.log_sync_global = kvm_log_sync_global;
1979     } else {
1980         kml->listener.log_sync = kvm_log_sync;
1981         kml->listener.log_clear = kvm_log_clear;
1982     }
1983 
1984     memory_listener_register(&kml->listener, as);
1985 
1986     for (i = 0; i < s->nr_as; ++i) {
1987         if (!s->as[i].as) {
1988             s->as[i].as = as;
1989             s->as[i].ml = kml;
1990             break;
1991         }
1992     }
1993 }
1994 
1995 static MemoryListener kvm_io_listener = {
1996     .name = "kvm-io",
1997     .coalesced_io_add = kvm_coalesce_pio_add,
1998     .coalesced_io_del = kvm_coalesce_pio_del,
1999     .eventfd_add = kvm_io_ioeventfd_add,
2000     .eventfd_del = kvm_io_ioeventfd_del,
2001     .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
2002 };
2003 
2004 int kvm_set_irq(KVMState *s, int irq, int level)
2005 {
2006     struct kvm_irq_level event;
2007     int ret;
2008 
2009     assert(kvm_async_interrupts_enabled());
2010 
2011     event.level = level;
2012     event.irq = irq;
2013     ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
2014     if (ret < 0) {
2015         perror("kvm_set_irq");
2016         abort();
2017     }
2018 
2019     return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
2020 }
2021 
2022 #ifdef KVM_CAP_IRQ_ROUTING
2023 typedef struct KVMMSIRoute {
2024     struct kvm_irq_routing_entry kroute;
2025     QTAILQ_ENTRY(KVMMSIRoute) entry;
2026 } KVMMSIRoute;
2027 
2028 static void set_gsi(KVMState *s, unsigned int gsi)
2029 {
2030     set_bit(gsi, s->used_gsi_bitmap);
2031 }
2032 
2033 static void clear_gsi(KVMState *s, unsigned int gsi)
2034 {
2035     clear_bit(gsi, s->used_gsi_bitmap);
2036 }
2037 
2038 void kvm_init_irq_routing(KVMState *s)
2039 {
2040     int gsi_count;
2041 
2042     gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
2043     if (gsi_count > 0) {
2044         /* Round up so we can search ints using ffs */
2045         s->used_gsi_bitmap = bitmap_new(gsi_count);
2046         s->gsi_count = gsi_count;
2047     }
2048 
2049     s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
2050     s->nr_allocated_irq_routes = 0;
2051 
2052     kvm_arch_init_irq_routing(s);
2053 }
2054 
2055 void kvm_irqchip_commit_routes(KVMState *s)
2056 {
2057     int ret;
2058 
2059     if (kvm_gsi_direct_mapping()) {
2060         return;
2061     }
2062 
2063     if (!kvm_gsi_routing_enabled()) {
2064         return;
2065     }
2066 
2067     s->irq_routes->flags = 0;
2068     trace_kvm_irqchip_commit_routes();
2069     ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
2070     assert(ret == 0);
2071 }
2072 
2073 void kvm_add_routing_entry(KVMState *s,
2074                            struct kvm_irq_routing_entry *entry)
2075 {
2076     struct kvm_irq_routing_entry *new;
2077     int n, size;
2078 
2079     if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
2080         n = s->nr_allocated_irq_routes * 2;
2081         if (n < 64) {
2082             n = 64;
2083         }
2084         size = sizeof(struct kvm_irq_routing);
2085         size += n * sizeof(*new);
2086         s->irq_routes = g_realloc(s->irq_routes, size);
2087         s->nr_allocated_irq_routes = n;
2088     }
2089     n = s->irq_routes->nr++;
2090     new = &s->irq_routes->entries[n];
2091 
2092     *new = *entry;
2093 
2094     set_gsi(s, entry->gsi);
2095 }
2096 
2097 static int kvm_update_routing_entry(KVMState *s,
2098                                     struct kvm_irq_routing_entry *new_entry)
2099 {
2100     struct kvm_irq_routing_entry *entry;
2101     int n;
2102 
2103     for (n = 0; n < s->irq_routes->nr; n++) {
2104         entry = &s->irq_routes->entries[n];
2105         if (entry->gsi != new_entry->gsi) {
2106             continue;
2107         }
2108 
2109         if(!memcmp(entry, new_entry, sizeof *entry)) {
2110             return 0;
2111         }
2112 
2113         *entry = *new_entry;
2114 
2115         return 0;
2116     }
2117 
2118     return -ESRCH;
2119 }
2120 
2121 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
2122 {
2123     struct kvm_irq_routing_entry e = {};
2124 
2125     assert(pin < s->gsi_count);
2126 
2127     e.gsi = irq;
2128     e.type = KVM_IRQ_ROUTING_IRQCHIP;
2129     e.flags = 0;
2130     e.u.irqchip.irqchip = irqchip;
2131     e.u.irqchip.pin = pin;
2132     kvm_add_routing_entry(s, &e);
2133 }
2134 
2135 void kvm_irqchip_release_virq(KVMState *s, int virq)
2136 {
2137     struct kvm_irq_routing_entry *e;
2138     int i;
2139 
2140     if (kvm_gsi_direct_mapping()) {
2141         return;
2142     }
2143 
2144     for (i = 0; i < s->irq_routes->nr; i++) {
2145         e = &s->irq_routes->entries[i];
2146         if (e->gsi == virq) {
2147             s->irq_routes->nr--;
2148             *e = s->irq_routes->entries[s->irq_routes->nr];
2149         }
2150     }
2151     clear_gsi(s, virq);
2152     kvm_arch_release_virq_post(virq);
2153     trace_kvm_irqchip_release_virq(virq);
2154 }
2155 
2156 void kvm_irqchip_add_change_notifier(Notifier *n)
2157 {
2158     notifier_list_add(&kvm_irqchip_change_notifiers, n);
2159 }
2160 
2161 void kvm_irqchip_remove_change_notifier(Notifier *n)
2162 {
2163     notifier_remove(n);
2164 }
2165 
2166 void kvm_irqchip_change_notify(void)
2167 {
2168     notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
2169 }
2170 
2171 int kvm_irqchip_get_virq(KVMState *s)
2172 {
2173     int next_virq;
2174 
2175     /* Return the lowest unused GSI in the bitmap */
2176     next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
2177     if (next_virq >= s->gsi_count) {
2178         return -ENOSPC;
2179     } else {
2180         return next_virq;
2181     }
2182 }
2183 
2184 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2185 {
2186     struct kvm_msi msi;
2187 
2188     msi.address_lo = (uint32_t)msg.address;
2189     msi.address_hi = msg.address >> 32;
2190     msi.data = le32_to_cpu(msg.data);
2191     msi.flags = 0;
2192     memset(msi.pad, 0, sizeof(msi.pad));
2193 
2194     return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
2195 }
2196 
2197 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2198 {
2199     struct kvm_irq_routing_entry kroute = {};
2200     int virq;
2201     KVMState *s = c->s;
2202     MSIMessage msg = {0, 0};
2203 
2204     if (pci_available && dev) {
2205         msg = pci_get_msi_message(dev, vector);
2206     }
2207 
2208     if (kvm_gsi_direct_mapping()) {
2209         return kvm_arch_msi_data_to_gsi(msg.data);
2210     }
2211 
2212     if (!kvm_gsi_routing_enabled()) {
2213         return -ENOSYS;
2214     }
2215 
2216     virq = kvm_irqchip_get_virq(s);
2217     if (virq < 0) {
2218         return virq;
2219     }
2220 
2221     kroute.gsi = virq;
2222     kroute.type = KVM_IRQ_ROUTING_MSI;
2223     kroute.flags = 0;
2224     kroute.u.msi.address_lo = (uint32_t)msg.address;
2225     kroute.u.msi.address_hi = msg.address >> 32;
2226     kroute.u.msi.data = le32_to_cpu(msg.data);
2227     if (pci_available && kvm_msi_devid_required()) {
2228         kroute.flags = KVM_MSI_VALID_DEVID;
2229         kroute.u.msi.devid = pci_requester_id(dev);
2230     }
2231     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2232         kvm_irqchip_release_virq(s, virq);
2233         return -EINVAL;
2234     }
2235 
2236     if (s->irq_routes->nr < s->gsi_count) {
2237         trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2238                                         vector, virq);
2239 
2240         kvm_add_routing_entry(s, &kroute);
2241         kvm_arch_add_msi_route_post(&kroute, vector, dev);
2242         c->changes++;
2243     } else {
2244         kvm_irqchip_release_virq(s, virq);
2245         return -ENOSPC;
2246     }
2247 
2248     return virq;
2249 }
2250 
2251 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2252                                  PCIDevice *dev)
2253 {
2254     struct kvm_irq_routing_entry kroute = {};
2255 
2256     if (kvm_gsi_direct_mapping()) {
2257         return 0;
2258     }
2259 
2260     if (!kvm_irqchip_in_kernel()) {
2261         return -ENOSYS;
2262     }
2263 
2264     kroute.gsi = virq;
2265     kroute.type = KVM_IRQ_ROUTING_MSI;
2266     kroute.flags = 0;
2267     kroute.u.msi.address_lo = (uint32_t)msg.address;
2268     kroute.u.msi.address_hi = msg.address >> 32;
2269     kroute.u.msi.data = le32_to_cpu(msg.data);
2270     if (pci_available && kvm_msi_devid_required()) {
2271         kroute.flags = KVM_MSI_VALID_DEVID;
2272         kroute.u.msi.devid = pci_requester_id(dev);
2273     }
2274     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2275         return -EINVAL;
2276     }
2277 
2278     trace_kvm_irqchip_update_msi_route(virq);
2279 
2280     return kvm_update_routing_entry(s, &kroute);
2281 }
2282 
2283 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2284                                     EventNotifier *resample, int virq,
2285                                     bool assign)
2286 {
2287     int fd = event_notifier_get_fd(event);
2288     int rfd = resample ? event_notifier_get_fd(resample) : -1;
2289 
2290     struct kvm_irqfd irqfd = {
2291         .fd = fd,
2292         .gsi = virq,
2293         .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2294     };
2295 
2296     if (rfd != -1) {
2297         assert(assign);
2298         if (kvm_irqchip_is_split()) {
2299             /*
2300              * When the slow irqchip (e.g. IOAPIC) is in the
2301              * userspace, KVM kernel resamplefd will not work because
2302              * the EOI of the interrupt will be delivered to userspace
2303              * instead, so the KVM kernel resamplefd kick will be
2304              * skipped.  The userspace here mimics what the kernel
2305              * provides with resamplefd, remember the resamplefd and
2306              * kick it when we receive EOI of this IRQ.
2307              *
2308              * This is hackery because IOAPIC is mostly bypassed
2309              * (except EOI broadcasts) when irqfd is used.  However
2310              * this can bring much performance back for split irqchip
2311              * with INTx IRQs (for VFIO, this gives 93% perf of the
2312              * full fast path, which is 46% perf boost comparing to
2313              * the INTx slow path).
2314              */
2315             kvm_resample_fd_insert(virq, resample);
2316         } else {
2317             irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2318             irqfd.resamplefd = rfd;
2319         }
2320     } else if (!assign) {
2321         if (kvm_irqchip_is_split()) {
2322             kvm_resample_fd_remove(virq);
2323         }
2324     }
2325 
2326     return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2327 }
2328 
2329 #else /* !KVM_CAP_IRQ_ROUTING */
2330 
2331 void kvm_init_irq_routing(KVMState *s)
2332 {
2333 }
2334 
2335 void kvm_irqchip_release_virq(KVMState *s, int virq)
2336 {
2337 }
2338 
2339 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2340 {
2341     abort();
2342 }
2343 
2344 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2345 {
2346     return -ENOSYS;
2347 }
2348 
2349 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2350 {
2351     return -ENOSYS;
2352 }
2353 
2354 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2355 {
2356     return -ENOSYS;
2357 }
2358 
2359 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2360                                     EventNotifier *resample, int virq,
2361                                     bool assign)
2362 {
2363     abort();
2364 }
2365 
2366 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2367 {
2368     return -ENOSYS;
2369 }
2370 #endif /* !KVM_CAP_IRQ_ROUTING */
2371 
2372 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2373                                        EventNotifier *rn, int virq)
2374 {
2375     return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2376 }
2377 
2378 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2379                                           int virq)
2380 {
2381     return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2382 }
2383 
2384 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2385                                    EventNotifier *rn, qemu_irq irq)
2386 {
2387     gpointer key, gsi;
2388     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2389 
2390     if (!found) {
2391         return -ENXIO;
2392     }
2393     return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2394 }
2395 
2396 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2397                                       qemu_irq irq)
2398 {
2399     gpointer key, gsi;
2400     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2401 
2402     if (!found) {
2403         return -ENXIO;
2404     }
2405     return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2406 }
2407 
2408 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2409 {
2410     g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2411 }
2412 
2413 static void kvm_irqchip_create(KVMState *s)
2414 {
2415     int ret;
2416 
2417     assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2418     if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2419         ;
2420     } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2421         ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2422         if (ret < 0) {
2423             fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2424             exit(1);
2425         }
2426     } else {
2427         return;
2428     }
2429 
2430     if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
2431         fprintf(stderr, "kvm: irqfd not implemented\n");
2432         exit(1);
2433     }
2434 
2435     /* First probe and see if there's a arch-specific hook to create the
2436      * in-kernel irqchip for us */
2437     ret = kvm_arch_irqchip_create(s);
2438     if (ret == 0) {
2439         if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2440             error_report("Split IRQ chip mode not supported.");
2441             exit(1);
2442         } else {
2443             ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2444         }
2445     }
2446     if (ret < 0) {
2447         fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2448         exit(1);
2449     }
2450 
2451     kvm_kernel_irqchip = true;
2452     /* If we have an in-kernel IRQ chip then we must have asynchronous
2453      * interrupt delivery (though the reverse is not necessarily true)
2454      */
2455     kvm_async_interrupts_allowed = true;
2456     kvm_halt_in_kernel_allowed = true;
2457 
2458     kvm_init_irq_routing(s);
2459 
2460     s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2461 }
2462 
2463 /* Find number of supported CPUs using the recommended
2464  * procedure from the kernel API documentation to cope with
2465  * older kernels that may be missing capabilities.
2466  */
2467 static int kvm_recommended_vcpus(KVMState *s)
2468 {
2469     int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2470     return (ret) ? ret : 4;
2471 }
2472 
2473 static int kvm_max_vcpus(KVMState *s)
2474 {
2475     int ret = kvm_vm_check_extension(s, KVM_CAP_MAX_VCPUS);
2476     return (ret) ? ret : kvm_recommended_vcpus(s);
2477 }
2478 
2479 static int kvm_max_vcpu_id(KVMState *s)
2480 {
2481     int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2482     return (ret) ? ret : kvm_max_vcpus(s);
2483 }
2484 
2485 bool kvm_vcpu_id_is_valid(int vcpu_id)
2486 {
2487     KVMState *s = KVM_STATE(current_accel());
2488     return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2489 }
2490 
2491 bool kvm_dirty_ring_enabled(void)
2492 {
2493     return kvm_state && kvm_state->kvm_dirty_ring_size;
2494 }
2495 
2496 static void query_stats_cb(StatsResultList **result, StatsTarget target,
2497                            strList *names, strList *targets, Error **errp);
2498 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2499 
2500 uint32_t kvm_dirty_ring_size(void)
2501 {
2502     return kvm_state->kvm_dirty_ring_size;
2503 }
2504 
2505 static int do_kvm_create_vm(KVMState *s, int type)
2506 {
2507     int ret;
2508 
2509     do {
2510         ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2511     } while (ret == -EINTR);
2512 
2513     if (ret < 0) {
2514         error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret));
2515 
2516 #ifdef TARGET_S390X
2517         if (ret == -EINVAL) {
2518             error_printf("Host kernel setup problem detected."
2519                          " Please verify:\n");
2520             error_printf("- for kernels supporting the"
2521                         " switch_amode or user_mode parameters, whether");
2522             error_printf(" user space is running in primary address space\n");
2523             error_printf("- for kernels supporting the vm.allocate_pgste"
2524                          " sysctl, whether it is enabled\n");
2525         }
2526 #elif defined(TARGET_PPC)
2527         if (ret == -EINVAL) {
2528             error_printf("PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2529                          (type == 2) ? "pr" : "hv");
2530         }
2531 #endif
2532     }
2533 
2534     return ret;
2535 }
2536 
2537 static int find_kvm_machine_type(MachineState *ms)
2538 {
2539     MachineClass *mc = MACHINE_GET_CLASS(ms);
2540     int type;
2541 
2542     if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2543         g_autofree char *kvm_type;
2544         kvm_type = object_property_get_str(OBJECT(current_machine),
2545                                            "kvm-type",
2546                                            &error_abort);
2547         type = mc->kvm_type(ms, kvm_type);
2548     } else if (mc->kvm_type) {
2549         type = mc->kvm_type(ms, NULL);
2550     } else {
2551         type = kvm_arch_get_default_type(ms);
2552     }
2553     return type;
2554 }
2555 
2556 static int kvm_setup_dirty_ring(KVMState *s)
2557 {
2558     uint64_t dirty_log_manual_caps;
2559     int ret;
2560 
2561     /*
2562      * Enable KVM dirty ring if supported, otherwise fall back to
2563      * dirty logging mode
2564      */
2565     ret = kvm_dirty_ring_init(s);
2566     if (ret < 0) {
2567         return ret;
2568     }
2569 
2570     /*
2571      * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2572      * enabled.  More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2573      * page is wr-protected initially, which is against how kvm dirty ring is
2574      * usage - kvm dirty ring requires all pages are wr-protected at the very
2575      * beginning.  Enabling this feature for dirty ring causes data corruption.
2576      *
2577      * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2578      * we may expect a higher stall time when starting the migration.  In the
2579      * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2580      * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2581      * guest pages.
2582      */
2583     if (!s->kvm_dirty_ring_size) {
2584         dirty_log_manual_caps =
2585             kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2586         dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2587                                   KVM_DIRTY_LOG_INITIALLY_SET);
2588         s->manual_dirty_log_protect = dirty_log_manual_caps;
2589         if (dirty_log_manual_caps) {
2590             ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2591                                     dirty_log_manual_caps);
2592             if (ret) {
2593                 warn_report("Trying to enable capability %"PRIu64" of "
2594                             "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2595                             "Falling back to the legacy mode. ",
2596                             dirty_log_manual_caps);
2597                 s->manual_dirty_log_protect = 0;
2598             }
2599         }
2600     }
2601 
2602     return 0;
2603 }
2604 
2605 static int kvm_init(AccelState *as, MachineState *ms)
2606 {
2607     MachineClass *mc = MACHINE_GET_CLASS(ms);
2608     static const char upgrade_note[] =
2609         "Please upgrade to at least kernel 4.5.\n";
2610     const struct {
2611         const char *name;
2612         int num;
2613     } num_cpus[] = {
2614         { "SMP",          ms->smp.cpus },
2615         { "hotpluggable", ms->smp.max_cpus },
2616         { /* end of list */ }
2617     }, *nc = num_cpus;
2618     int soft_vcpus_limit, hard_vcpus_limit;
2619     KVMState *s = KVM_STATE(as);
2620     const KVMCapabilityInfo *missing_cap;
2621     int ret;
2622     int type;
2623 
2624     qemu_mutex_init(&kml_slots_lock);
2625 
2626     /*
2627      * On systems where the kernel can support different base page
2628      * sizes, host page size may be different from TARGET_PAGE_SIZE,
2629      * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
2630      * page size for the system though.
2631      */
2632     assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2633 
2634     s->sigmask_len = 8;
2635     accel_blocker_init();
2636 
2637 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2638     QTAILQ_INIT(&s->kvm_sw_breakpoints);
2639 #endif
2640     QLIST_INIT(&s->kvm_parked_vcpus);
2641     s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR);
2642     if (s->fd == -1) {
2643         error_report("Could not access KVM kernel module: %m");
2644         ret = -errno;
2645         goto err;
2646     }
2647 
2648     ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2649     if (ret < KVM_API_VERSION) {
2650         if (ret >= 0) {
2651             ret = -EINVAL;
2652         }
2653         error_report("kvm version too old");
2654         goto err;
2655     }
2656 
2657     if (ret > KVM_API_VERSION) {
2658         ret = -EINVAL;
2659         error_report("kvm version not supported");
2660         goto err;
2661     }
2662 
2663     kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2664     s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2665 
2666     /* If unspecified, use the default value */
2667     if (!s->nr_slots_max) {
2668         s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT;
2669     }
2670 
2671     type = find_kvm_machine_type(ms);
2672     if (type < 0) {
2673         ret = -EINVAL;
2674         goto err;
2675     }
2676 
2677     ret = do_kvm_create_vm(s, type);
2678     if (ret < 0) {
2679         goto err;
2680     }
2681 
2682     s->vmfd = ret;
2683 
2684     s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2685     if (s->nr_as <= 1) {
2686         s->nr_as = 1;
2687     }
2688     s->as = g_new0(struct KVMAs, s->nr_as);
2689 
2690     /* check the vcpu limits */
2691     soft_vcpus_limit = kvm_recommended_vcpus(s);
2692     hard_vcpus_limit = kvm_max_vcpus(s);
2693 
2694     while (nc->name) {
2695         if (nc->num > soft_vcpus_limit) {
2696             warn_report("Number of %s cpus requested (%d) exceeds "
2697                         "the recommended cpus supported by KVM (%d)",
2698                         nc->name, nc->num, soft_vcpus_limit);
2699 
2700             if (nc->num > hard_vcpus_limit) {
2701                 error_report("Number of %s cpus requested (%d) exceeds "
2702                              "the maximum cpus supported by KVM (%d)",
2703                              nc->name, nc->num, hard_vcpus_limit);
2704                 exit(1);
2705             }
2706         }
2707         nc++;
2708     }
2709 
2710     missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2711     if (!missing_cap) {
2712         missing_cap =
2713             kvm_check_extension_list(s, kvm_arch_required_capabilities);
2714     }
2715     if (missing_cap) {
2716         ret = -EINVAL;
2717         error_report("kvm does not support %s", missing_cap->name);
2718         error_printf("%s", upgrade_note);
2719         goto err;
2720     }
2721 
2722     s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2723     s->coalesced_pio = s->coalesced_mmio &&
2724                        kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2725 
2726     ret = kvm_setup_dirty_ring(s);
2727     if (ret < 0) {
2728         goto err;
2729     }
2730 
2731 #ifdef KVM_CAP_VCPU_EVENTS
2732     s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2733 #endif
2734     s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2735 
2736     s->irq_set_ioctl = KVM_IRQ_LINE;
2737     if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2738         s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2739     }
2740 
2741     kvm_readonly_mem_allowed =
2742         (kvm_vm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2743 
2744     kvm_resamplefds_allowed =
2745         (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2746 
2747     kvm_vm_attributes_allowed =
2748         (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2749 
2750 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2751     kvm_has_guest_debug =
2752         (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2753 #endif
2754 
2755     kvm_sstep_flags = 0;
2756     if (kvm_has_guest_debug) {
2757         kvm_sstep_flags = SSTEP_ENABLE;
2758 
2759 #if defined TARGET_KVM_HAVE_GUEST_DEBUG
2760         int guest_debug_flags =
2761             kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2762 
2763         if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2764             kvm_sstep_flags |= SSTEP_NOIRQ;
2765         }
2766 #endif
2767     }
2768 
2769     kvm_state = s;
2770 
2771     ret = kvm_arch_init(ms, s);
2772     if (ret < 0) {
2773         goto err;
2774     }
2775 
2776     kvm_supported_memory_attributes = kvm_vm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
2777     kvm_guest_memfd_supported =
2778         kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
2779         kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
2780         (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
2781     kvm_pre_fault_memory_supported = kvm_vm_check_extension(s, KVM_CAP_PRE_FAULT_MEMORY);
2782 
2783     if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2784         s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2785     }
2786 
2787     qemu_register_reset(kvm_unpoison_all, NULL);
2788 
2789     if (s->kernel_irqchip_allowed) {
2790         kvm_irqchip_create(s);
2791     }
2792 
2793     s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2794     s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2795     s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2796     s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2797 
2798     kvm_memory_listener_register(s, &s->memory_listener,
2799                                  &address_space_memory, 0, "kvm-memory");
2800     memory_listener_register(&kvm_io_listener,
2801                              &address_space_io);
2802 
2803     s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2804     if (!s->sync_mmu) {
2805         ret = ram_block_discard_disable(true);
2806         assert(!ret);
2807     }
2808 
2809     if (s->kvm_dirty_ring_size) {
2810         kvm_dirty_ring_reaper_init(s);
2811     }
2812 
2813     if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2814         add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2815                             query_stats_schemas_cb);
2816     }
2817 
2818     return 0;
2819 
2820 err:
2821     assert(ret < 0);
2822     if (s->vmfd >= 0) {
2823         close(s->vmfd);
2824     }
2825     if (s->fd != -1) {
2826         close(s->fd);
2827     }
2828     g_free(s->as);
2829     g_free(s->memory_listener.slots);
2830 
2831     return ret;
2832 }
2833 
2834 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2835 {
2836     s->sigmask_len = sigmask_len;
2837 }
2838 
2839 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2840                           int size, uint32_t count)
2841 {
2842     int i;
2843     uint8_t *ptr = data;
2844 
2845     for (i = 0; i < count; i++) {
2846         address_space_rw(&address_space_io, port, attrs,
2847                          ptr, size,
2848                          direction == KVM_EXIT_IO_OUT);
2849         ptr += size;
2850     }
2851 }
2852 
2853 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2854 {
2855     int i;
2856 
2857     fprintf(stderr, "KVM internal error. Suberror: %d\n",
2858             run->internal.suberror);
2859 
2860     for (i = 0; i < run->internal.ndata; ++i) {
2861         fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2862                 i, (uint64_t)run->internal.data[i]);
2863     }
2864     if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2865         fprintf(stderr, "emulation failure\n");
2866         if (!kvm_arch_stop_on_emulation_error(cpu)) {
2867             cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2868             return EXCP_INTERRUPT;
2869         }
2870     }
2871     /* FIXME: Should trigger a qmp message to let management know
2872      * something went wrong.
2873      */
2874     return -1;
2875 }
2876 
2877 void kvm_flush_coalesced_mmio_buffer(void)
2878 {
2879     KVMState *s = kvm_state;
2880 
2881     if (!s || s->coalesced_flush_in_progress) {
2882         return;
2883     }
2884 
2885     s->coalesced_flush_in_progress = true;
2886 
2887     if (s->coalesced_mmio_ring) {
2888         struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2889         while (ring->first != ring->last) {
2890             struct kvm_coalesced_mmio *ent;
2891 
2892             ent = &ring->coalesced_mmio[ring->first];
2893 
2894             if (ent->pio == 1) {
2895                 address_space_write(&address_space_io, ent->phys_addr,
2896                                     MEMTXATTRS_UNSPECIFIED, ent->data,
2897                                     ent->len);
2898             } else {
2899                 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2900             }
2901             smp_wmb();
2902             ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2903         }
2904     }
2905 
2906     s->coalesced_flush_in_progress = false;
2907 }
2908 
2909 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2910 {
2911     if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2912         Error *err = NULL;
2913         int ret = kvm_arch_get_registers(cpu, &err);
2914         if (ret) {
2915             if (err) {
2916                 error_reportf_err(err, "Failed to synchronize CPU state: ");
2917             } else {
2918                 error_report("Failed to get registers: %s", strerror(-ret));
2919             }
2920 
2921             cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2922             vm_stop(RUN_STATE_INTERNAL_ERROR);
2923         }
2924 
2925         cpu->vcpu_dirty = true;
2926     }
2927 }
2928 
2929 void kvm_cpu_synchronize_state(CPUState *cpu)
2930 {
2931     if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2932         run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2933     }
2934 }
2935 
2936 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2937 {
2938     Error *err = NULL;
2939     int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE, &err);
2940     if (ret) {
2941         if (err) {
2942             error_reportf_err(err, "Restoring resisters after reset: ");
2943         } else {
2944             error_report("Failed to put registers after reset: %s",
2945                          strerror(-ret));
2946         }
2947         cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2948         vm_stop(RUN_STATE_INTERNAL_ERROR);
2949     }
2950 
2951     cpu->vcpu_dirty = false;
2952 }
2953 
2954 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2955 {
2956     run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2957 
2958     if (cpu == first_cpu) {
2959         kvm_reset_parked_vcpus(kvm_state);
2960     }
2961 }
2962 
2963 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2964 {
2965     Error *err = NULL;
2966     int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE, &err);
2967     if (ret) {
2968         if (err) {
2969             error_reportf_err(err, "Putting registers after init: ");
2970         } else {
2971             error_report("Failed to put registers after init: %s",
2972                          strerror(-ret));
2973         }
2974         exit(1);
2975     }
2976 
2977     cpu->vcpu_dirty = false;
2978 }
2979 
2980 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2981 {
2982     if (!kvm_state->guest_state_protected) {
2983         /*
2984          * This runs before the machine_init_done notifiers, and is the last
2985          * opportunity to synchronize the state of confidential guests.
2986          */
2987         run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2988     }
2989 }
2990 
2991 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2992 {
2993     cpu->vcpu_dirty = true;
2994 }
2995 
2996 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2997 {
2998     run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2999 }
3000 
3001 #ifdef KVM_HAVE_MCE_INJECTION
3002 static __thread void *pending_sigbus_addr;
3003 static __thread int pending_sigbus_code;
3004 static __thread bool have_sigbus_pending;
3005 #endif
3006 
3007 static void kvm_cpu_kick(CPUState *cpu)
3008 {
3009     qatomic_set(&cpu->kvm_run->immediate_exit, 1);
3010 }
3011 
3012 static void kvm_cpu_kick_self(void)
3013 {
3014     if (kvm_immediate_exit) {
3015         kvm_cpu_kick(current_cpu);
3016     } else {
3017         qemu_cpu_kick_self();
3018     }
3019 }
3020 
3021 static void kvm_eat_signals(CPUState *cpu)
3022 {
3023     struct timespec ts = { 0, 0 };
3024     siginfo_t siginfo;
3025     sigset_t waitset;
3026     sigset_t chkset;
3027     int r;
3028 
3029     if (kvm_immediate_exit) {
3030         qatomic_set(&cpu->kvm_run->immediate_exit, 0);
3031         /* Write kvm_run->immediate_exit before the cpu->exit_request
3032          * write in kvm_cpu_exec.
3033          */
3034         smp_wmb();
3035         return;
3036     }
3037 
3038     sigemptyset(&waitset);
3039     sigaddset(&waitset, SIG_IPI);
3040 
3041     do {
3042         r = sigtimedwait(&waitset, &siginfo, &ts);
3043         if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
3044             perror("sigtimedwait");
3045             exit(1);
3046         }
3047 
3048         r = sigpending(&chkset);
3049         if (r == -1) {
3050             perror("sigpending");
3051             exit(1);
3052         }
3053     } while (sigismember(&chkset, SIG_IPI));
3054 }
3055 
3056 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
3057 {
3058     MemoryRegionSection section;
3059     ram_addr_t offset;
3060     MemoryRegion *mr;
3061     RAMBlock *rb;
3062     void *addr;
3063     int ret = -EINVAL;
3064 
3065     trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared");
3066 
3067     if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) ||
3068         !QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) {
3069         return ret;
3070     }
3071 
3072     if (!size) {
3073         return ret;
3074     }
3075 
3076     section = memory_region_find(get_system_memory(), start, size);
3077     mr = section.mr;
3078     if (!mr) {
3079         /*
3080          * Ignore converting non-assigned region to shared.
3081          *
3082          * TDX requires vMMIO region to be shared to inject #VE to guest.
3083          * OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region,
3084          * and vIO-APIC 0xFEC00000 4K page.
3085          * OVMF assigns 32bit PCI MMIO region to
3086          * [top of low memory: typically 2GB=0xC000000,  0xFC00000)
3087          */
3088         if (!to_private) {
3089             return 0;
3090         }
3091         return ret;
3092     }
3093 
3094     if (!memory_region_has_guest_memfd(mr)) {
3095         /*
3096          * Because vMMIO region must be shared, guest TD may convert vMMIO
3097          * region to shared explicitly.  Don't complain such case.  See
3098          * memory_region_type() for checking if the region is MMIO region.
3099          */
3100         if (!to_private &&
3101             !memory_region_is_ram(mr) &&
3102             !memory_region_is_ram_device(mr) &&
3103             !memory_region_is_rom(mr) &&
3104             !memory_region_is_romd(mr)) {
3105             ret = 0;
3106         } else {
3107             error_report("Convert non guest_memfd backed memory region "
3108                         "(0x%"HWADDR_PRIx" ,+ 0x%"HWADDR_PRIx") to %s",
3109                         start, size, to_private ? "private" : "shared");
3110         }
3111         goto out_unref;
3112     }
3113 
3114     if (to_private) {
3115         ret = kvm_set_memory_attributes_private(start, size);
3116     } else {
3117         ret = kvm_set_memory_attributes_shared(start, size);
3118     }
3119     if (ret) {
3120         goto out_unref;
3121     }
3122 
3123     addr = memory_region_get_ram_ptr(mr) + section.offset_within_region;
3124     rb = qemu_ram_block_from_host(addr, false, &offset);
3125 
3126     ret = ram_block_attributes_state_change(RAM_BLOCK_ATTRIBUTES(mr->rdm),
3127                                             offset, size, to_private);
3128     if (ret) {
3129         error_report("Failed to notify the listener the state change of "
3130                      "(0x%"HWADDR_PRIx" + 0x%"HWADDR_PRIx") to %s",
3131                      start, size, to_private ? "private" : "shared");
3132         goto out_unref;
3133     }
3134 
3135     if (to_private) {
3136         if (rb->page_size != qemu_real_host_page_size()) {
3137             /*
3138              * shared memory is backed by hugetlb, which is supposed to be
3139              * pre-allocated and doesn't need to be discarded
3140              */
3141             goto out_unref;
3142         }
3143         ret = ram_block_discard_range(rb, offset, size);
3144     } else {
3145         ret = ram_block_discard_guest_memfd_range(rb, offset, size);
3146     }
3147 
3148 out_unref:
3149     memory_region_unref(mr);
3150     return ret;
3151 }
3152 
3153 int kvm_cpu_exec(CPUState *cpu)
3154 {
3155     struct kvm_run *run = cpu->kvm_run;
3156     int ret, run_ret;
3157 
3158     trace_kvm_cpu_exec();
3159 
3160     if (kvm_arch_process_async_events(cpu)) {
3161         qatomic_set(&cpu->exit_request, 0);
3162         return EXCP_HLT;
3163     }
3164 
3165     bql_unlock();
3166     cpu_exec_start(cpu);
3167 
3168     do {
3169         MemTxAttrs attrs;
3170 
3171         if (cpu->vcpu_dirty) {
3172             Error *err = NULL;
3173             ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE, &err);
3174             if (ret) {
3175                 if (err) {
3176                     error_reportf_err(err, "Putting registers after init: ");
3177                 } else {
3178                     error_report("Failed to put registers after init: %s",
3179                                  strerror(-ret));
3180                 }
3181                 ret = -1;
3182                 break;
3183             }
3184 
3185             cpu->vcpu_dirty = false;
3186         }
3187 
3188         kvm_arch_pre_run(cpu, run);
3189         if (qatomic_read(&cpu->exit_request)) {
3190             trace_kvm_interrupt_exit_request();
3191             /*
3192              * KVM requires us to reenter the kernel after IO exits to complete
3193              * instruction emulation. This self-signal will ensure that we
3194              * leave ASAP again.
3195              */
3196             kvm_cpu_kick_self();
3197         }
3198 
3199         /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
3200          * Matching barrier in kvm_eat_signals.
3201          */
3202         smp_rmb();
3203 
3204         run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
3205 
3206         attrs = kvm_arch_post_run(cpu, run);
3207 
3208 #ifdef KVM_HAVE_MCE_INJECTION
3209         if (unlikely(have_sigbus_pending)) {
3210             bql_lock();
3211             kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
3212                                     pending_sigbus_addr);
3213             have_sigbus_pending = false;
3214             bql_unlock();
3215         }
3216 #endif
3217 
3218         if (run_ret < 0) {
3219             if (run_ret == -EINTR || run_ret == -EAGAIN) {
3220                 trace_kvm_io_window_exit();
3221                 kvm_eat_signals(cpu);
3222                 ret = EXCP_INTERRUPT;
3223                 break;
3224             }
3225             if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) {
3226                 fprintf(stderr, "error: kvm run failed %s\n",
3227                         strerror(-run_ret));
3228 #ifdef TARGET_PPC
3229                 if (run_ret == -EBUSY) {
3230                     fprintf(stderr,
3231                             "This is probably because your SMT is enabled.\n"
3232                             "VCPU can only run on primary threads with all "
3233                             "secondary threads offline.\n");
3234                 }
3235 #endif
3236                 ret = -1;
3237                 break;
3238             }
3239         }
3240 
3241         trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
3242         switch (run->exit_reason) {
3243         case KVM_EXIT_IO:
3244             /* Called outside BQL */
3245             kvm_handle_io(run->io.port, attrs,
3246                           (uint8_t *)run + run->io.data_offset,
3247                           run->io.direction,
3248                           run->io.size,
3249                           run->io.count);
3250             ret = 0;
3251             break;
3252         case KVM_EXIT_MMIO:
3253             /* Called outside BQL */
3254             address_space_rw(&address_space_memory,
3255                              run->mmio.phys_addr, attrs,
3256                              run->mmio.data,
3257                              run->mmio.len,
3258                              run->mmio.is_write);
3259             ret = 0;
3260             break;
3261         case KVM_EXIT_IRQ_WINDOW_OPEN:
3262             ret = EXCP_INTERRUPT;
3263             break;
3264         case KVM_EXIT_SHUTDOWN:
3265             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3266             ret = EXCP_INTERRUPT;
3267             break;
3268         case KVM_EXIT_UNKNOWN:
3269             fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
3270                     (uint64_t)run->hw.hardware_exit_reason);
3271             ret = -1;
3272             break;
3273         case KVM_EXIT_INTERNAL_ERROR:
3274             ret = kvm_handle_internal_error(cpu, run);
3275             break;
3276         case KVM_EXIT_DIRTY_RING_FULL:
3277             /*
3278              * We shouldn't continue if the dirty ring of this vcpu is
3279              * still full.  Got kicked by KVM_RESET_DIRTY_RINGS.
3280              */
3281             trace_kvm_dirty_ring_full(cpu->cpu_index);
3282             bql_lock();
3283             /*
3284              * We throttle vCPU by making it sleep once it exit from kernel
3285              * due to dirty ring full. In the dirtylimit scenario, reaping
3286              * all vCPUs after a single vCPU dirty ring get full result in
3287              * the miss of sleep, so just reap the ring-fulled vCPU.
3288              */
3289             if (dirtylimit_in_service()) {
3290                 kvm_dirty_ring_reap(kvm_state, cpu);
3291             } else {
3292                 kvm_dirty_ring_reap(kvm_state, NULL);
3293             }
3294             bql_unlock();
3295             dirtylimit_vcpu_execute(cpu);
3296             ret = 0;
3297             break;
3298         case KVM_EXIT_SYSTEM_EVENT:
3299             trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type);
3300             switch (run->system_event.type) {
3301             case KVM_SYSTEM_EVENT_SHUTDOWN:
3302                 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
3303                 ret = EXCP_INTERRUPT;
3304                 break;
3305             case KVM_SYSTEM_EVENT_RESET:
3306                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3307                 ret = EXCP_INTERRUPT;
3308                 break;
3309             case KVM_SYSTEM_EVENT_CRASH:
3310                 kvm_cpu_synchronize_state(cpu);
3311                 bql_lock();
3312                 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3313                 bql_unlock();
3314                 ret = 0;
3315                 break;
3316             default:
3317                 ret = kvm_arch_handle_exit(cpu, run);
3318                 break;
3319             }
3320             break;
3321         case KVM_EXIT_MEMORY_FAULT:
3322             trace_kvm_memory_fault(run->memory_fault.gpa,
3323                                    run->memory_fault.size,
3324                                    run->memory_fault.flags);
3325             if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) {
3326                 error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64,
3327                              (uint64_t)run->memory_fault.flags);
3328                 ret = -1;
3329                 break;
3330             }
3331             ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size,
3332                                      run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE);
3333             break;
3334         default:
3335             ret = kvm_arch_handle_exit(cpu, run);
3336             break;
3337         }
3338     } while (ret == 0);
3339 
3340     cpu_exec_end(cpu);
3341     bql_lock();
3342 
3343     if (ret < 0) {
3344         cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
3345         vm_stop(RUN_STATE_INTERNAL_ERROR);
3346     }
3347 
3348     qatomic_set(&cpu->exit_request, 0);
3349     return ret;
3350 }
3351 
3352 int kvm_ioctl(KVMState *s, unsigned long type, ...)
3353 {
3354     int ret;
3355     void *arg;
3356     va_list ap;
3357 
3358     va_start(ap, type);
3359     arg = va_arg(ap, void *);
3360     va_end(ap);
3361 
3362     trace_kvm_ioctl(type, arg);
3363     ret = ioctl(s->fd, type, arg);
3364     if (ret == -1) {
3365         ret = -errno;
3366     }
3367     return ret;
3368 }
3369 
3370 int kvm_vm_ioctl(KVMState *s, unsigned long type, ...)
3371 {
3372     int ret;
3373     void *arg;
3374     va_list ap;
3375 
3376     va_start(ap, type);
3377     arg = va_arg(ap, void *);
3378     va_end(ap);
3379 
3380     trace_kvm_vm_ioctl(type, arg);
3381     accel_ioctl_begin();
3382     ret = ioctl(s->vmfd, type, arg);
3383     accel_ioctl_end();
3384     if (ret == -1) {
3385         ret = -errno;
3386     }
3387     return ret;
3388 }
3389 
3390 int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...)
3391 {
3392     int ret;
3393     void *arg;
3394     va_list ap;
3395 
3396     va_start(ap, type);
3397     arg = va_arg(ap, void *);
3398     va_end(ap);
3399 
3400     trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3401     accel_cpu_ioctl_begin(cpu);
3402     ret = ioctl(cpu->kvm_fd, type, arg);
3403     accel_cpu_ioctl_end(cpu);
3404     if (ret == -1) {
3405         ret = -errno;
3406     }
3407     return ret;
3408 }
3409 
3410 int kvm_device_ioctl(int fd, unsigned long type, ...)
3411 {
3412     int ret;
3413     void *arg;
3414     va_list ap;
3415 
3416     va_start(ap, type);
3417     arg = va_arg(ap, void *);
3418     va_end(ap);
3419 
3420     trace_kvm_device_ioctl(fd, type, arg);
3421     accel_ioctl_begin();
3422     ret = ioctl(fd, type, arg);
3423     accel_ioctl_end();
3424     if (ret == -1) {
3425         ret = -errno;
3426     }
3427     return ret;
3428 }
3429 
3430 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3431 {
3432     int ret;
3433     struct kvm_device_attr attribute = {
3434         .group = group,
3435         .attr = attr,
3436     };
3437 
3438     if (!kvm_vm_attributes_allowed) {
3439         return 0;
3440     }
3441 
3442     ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3443     /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3444     return ret ? 0 : 1;
3445 }
3446 
3447 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3448 {
3449     struct kvm_device_attr attribute = {
3450         .group = group,
3451         .attr = attr,
3452         .flags = 0,
3453     };
3454 
3455     return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3456 }
3457 
3458 int kvm_device_access(int fd, int group, uint64_t attr,
3459                       void *val, bool write, Error **errp)
3460 {
3461     struct kvm_device_attr kvmattr;
3462     int err;
3463 
3464     kvmattr.flags = 0;
3465     kvmattr.group = group;
3466     kvmattr.attr = attr;
3467     kvmattr.addr = (uintptr_t)val;
3468 
3469     err = kvm_device_ioctl(fd,
3470                            write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3471                            &kvmattr);
3472     if (err < 0) {
3473         error_setg_errno(errp, -err,
3474                          "KVM_%s_DEVICE_ATTR failed: Group %d "
3475                          "attr 0x%016" PRIx64,
3476                          write ? "SET" : "GET", group, attr);
3477     }
3478     return err;
3479 }
3480 
3481 bool kvm_has_sync_mmu(void)
3482 {
3483     return kvm_state->sync_mmu;
3484 }
3485 
3486 int kvm_has_vcpu_events(void)
3487 {
3488     return kvm_state->vcpu_events;
3489 }
3490 
3491 int kvm_max_nested_state_length(void)
3492 {
3493     return kvm_state->max_nested_state_len;
3494 }
3495 
3496 int kvm_has_gsi_routing(void)
3497 {
3498 #ifdef KVM_CAP_IRQ_ROUTING
3499     return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3500 #else
3501     return false;
3502 #endif
3503 }
3504 
3505 bool kvm_arm_supports_user_irq(void)
3506 {
3507     return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3508 }
3509 
3510 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
3511 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
3512 {
3513     struct kvm_sw_breakpoint *bp;
3514 
3515     QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3516         if (bp->pc == pc) {
3517             return bp;
3518         }
3519     }
3520     return NULL;
3521 }
3522 
3523 int kvm_sw_breakpoints_active(CPUState *cpu)
3524 {
3525     return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3526 }
3527 
3528 struct kvm_set_guest_debug_data {
3529     struct kvm_guest_debug dbg;
3530     int err;
3531 };
3532 
3533 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3534 {
3535     struct kvm_set_guest_debug_data *dbg_data =
3536         (struct kvm_set_guest_debug_data *) data.host_ptr;
3537 
3538     dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3539                                    &dbg_data->dbg);
3540 }
3541 
3542 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3543 {
3544     struct kvm_set_guest_debug_data data;
3545 
3546     data.dbg.control = reinject_trap;
3547 
3548     if (cpu->singlestep_enabled) {
3549         data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3550 
3551         if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3552             data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3553         }
3554     }
3555     kvm_arch_update_guest_debug(cpu, &data.dbg);
3556 
3557     run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3558                RUN_ON_CPU_HOST_PTR(&data));
3559     return data.err;
3560 }
3561 
3562 bool kvm_supports_guest_debug(void)
3563 {
3564     /* probed during kvm_init() */
3565     return kvm_has_guest_debug;
3566 }
3567 
3568 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3569 {
3570     struct kvm_sw_breakpoint *bp;
3571     int err;
3572 
3573     if (type == GDB_BREAKPOINT_SW) {
3574         bp = kvm_find_sw_breakpoint(cpu, addr);
3575         if (bp) {
3576             bp->use_count++;
3577             return 0;
3578         }
3579 
3580         bp = g_new(struct kvm_sw_breakpoint, 1);
3581         bp->pc = addr;
3582         bp->use_count = 1;
3583         err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3584         if (err) {
3585             g_free(bp);
3586             return err;
3587         }
3588 
3589         QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3590     } else {
3591         err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3592         if (err) {
3593             return err;
3594         }
3595     }
3596 
3597     CPU_FOREACH(cpu) {
3598         err = kvm_update_guest_debug(cpu, 0);
3599         if (err) {
3600             return err;
3601         }
3602     }
3603     return 0;
3604 }
3605 
3606 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3607 {
3608     struct kvm_sw_breakpoint *bp;
3609     int err;
3610 
3611     if (type == GDB_BREAKPOINT_SW) {
3612         bp = kvm_find_sw_breakpoint(cpu, addr);
3613         if (!bp) {
3614             return -ENOENT;
3615         }
3616 
3617         if (bp->use_count > 1) {
3618             bp->use_count--;
3619             return 0;
3620         }
3621 
3622         err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3623         if (err) {
3624             return err;
3625         }
3626 
3627         QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3628         g_free(bp);
3629     } else {
3630         err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3631         if (err) {
3632             return err;
3633         }
3634     }
3635 
3636     CPU_FOREACH(cpu) {
3637         err = kvm_update_guest_debug(cpu, 0);
3638         if (err) {
3639             return err;
3640         }
3641     }
3642     return 0;
3643 }
3644 
3645 void kvm_remove_all_breakpoints(CPUState *cpu)
3646 {
3647     struct kvm_sw_breakpoint *bp, *next;
3648     KVMState *s = cpu->kvm_state;
3649     CPUState *tmpcpu;
3650 
3651     QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3652         if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3653             /* Try harder to find a CPU that currently sees the breakpoint. */
3654             CPU_FOREACH(tmpcpu) {
3655                 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3656                     break;
3657                 }
3658             }
3659         }
3660         QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3661         g_free(bp);
3662     }
3663     kvm_arch_remove_all_hw_breakpoints();
3664 
3665     CPU_FOREACH(cpu) {
3666         kvm_update_guest_debug(cpu, 0);
3667     }
3668 }
3669 
3670 #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */
3671 
3672 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3673 {
3674     KVMState *s = kvm_state;
3675     struct kvm_signal_mask *sigmask;
3676     int r;
3677 
3678     sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3679 
3680     sigmask->len = s->sigmask_len;
3681     memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3682     r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3683     g_free(sigmask);
3684 
3685     return r;
3686 }
3687 
3688 static void kvm_ipi_signal(int sig)
3689 {
3690     if (current_cpu) {
3691         assert(kvm_immediate_exit);
3692         kvm_cpu_kick(current_cpu);
3693     }
3694 }
3695 
3696 void kvm_init_cpu_signals(CPUState *cpu)
3697 {
3698     int r;
3699     sigset_t set;
3700     struct sigaction sigact;
3701 
3702     memset(&sigact, 0, sizeof(sigact));
3703     sigact.sa_handler = kvm_ipi_signal;
3704     sigaction(SIG_IPI, &sigact, NULL);
3705 
3706     pthread_sigmask(SIG_BLOCK, NULL, &set);
3707 #if defined KVM_HAVE_MCE_INJECTION
3708     sigdelset(&set, SIGBUS);
3709     pthread_sigmask(SIG_SETMASK, &set, NULL);
3710 #endif
3711     sigdelset(&set, SIG_IPI);
3712     if (kvm_immediate_exit) {
3713         r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3714     } else {
3715         r = kvm_set_signal_mask(cpu, &set);
3716     }
3717     if (r) {
3718         fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3719         exit(1);
3720     }
3721 }
3722 
3723 /* Called asynchronously in VCPU thread.  */
3724 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3725 {
3726 #ifdef KVM_HAVE_MCE_INJECTION
3727     if (have_sigbus_pending) {
3728         return 1;
3729     }
3730     have_sigbus_pending = true;
3731     pending_sigbus_addr = addr;
3732     pending_sigbus_code = code;
3733     qatomic_set(&cpu->exit_request, 1);
3734     return 0;
3735 #else
3736     return 1;
3737 #endif
3738 }
3739 
3740 /* Called synchronously (via signalfd) in main thread.  */
3741 int kvm_on_sigbus(int code, void *addr)
3742 {
3743 #ifdef KVM_HAVE_MCE_INJECTION
3744     /* Action required MCE kills the process if SIGBUS is blocked.  Because
3745      * that's what happens in the I/O thread, where we handle MCE via signalfd,
3746      * we can only get action optional here.
3747      */
3748     assert(code != BUS_MCEERR_AR);
3749     kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3750     return 0;
3751 #else
3752     return 1;
3753 #endif
3754 }
3755 
3756 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3757 {
3758     int ret;
3759     struct kvm_create_device create_dev;
3760 
3761     create_dev.type = type;
3762     create_dev.fd = -1;
3763     create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3764 
3765     if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3766         return -ENOTSUP;
3767     }
3768 
3769     ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3770     if (ret) {
3771         return ret;
3772     }
3773 
3774     return test ? 0 : create_dev.fd;
3775 }
3776 
3777 bool kvm_device_supported(int vmfd, uint64_t type)
3778 {
3779     struct kvm_create_device create_dev = {
3780         .type = type,
3781         .fd = -1,
3782         .flags = KVM_CREATE_DEVICE_TEST,
3783     };
3784 
3785     if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3786         return false;
3787     }
3788 
3789     return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3790 }
3791 
3792 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3793 {
3794     struct kvm_one_reg reg;
3795     int r;
3796 
3797     reg.id = id;
3798     reg.addr = (uintptr_t) source;
3799     r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
3800     if (r) {
3801         trace_kvm_failed_reg_set(id, strerror(-r));
3802     }
3803     return r;
3804 }
3805 
3806 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3807 {
3808     struct kvm_one_reg reg;
3809     int r;
3810 
3811     reg.id = id;
3812     reg.addr = (uintptr_t) target;
3813     r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
3814     if (r) {
3815         trace_kvm_failed_reg_get(id, strerror(-r));
3816     }
3817     return r;
3818 }
3819 
3820 static bool kvm_accel_has_memory(AccelState *accel, AddressSpace *as,
3821                                  hwaddr start_addr, hwaddr size)
3822 {
3823     KVMState *kvm = KVM_STATE(accel);
3824     int i;
3825 
3826     for (i = 0; i < kvm->nr_as; ++i) {
3827         if (kvm->as[i].as == as && kvm->as[i].ml) {
3828             size = MIN(kvm_max_slot_size, size);
3829             return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3830                                                     start_addr, size);
3831         }
3832     }
3833 
3834     return false;
3835 }
3836 
3837 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3838                                    const char *name, void *opaque,
3839                                    Error **errp)
3840 {
3841     KVMState *s = KVM_STATE(obj);
3842     int64_t value = s->kvm_shadow_mem;
3843 
3844     visit_type_int(v, name, &value, errp);
3845 }
3846 
3847 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3848                                    const char *name, void *opaque,
3849                                    Error **errp)
3850 {
3851     KVMState *s = KVM_STATE(obj);
3852     int64_t value;
3853 
3854     if (s->fd != -1) {
3855         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3856         return;
3857     }
3858 
3859     if (!visit_type_int(v, name, &value, errp)) {
3860         return;
3861     }
3862 
3863     s->kvm_shadow_mem = value;
3864 }
3865 
3866 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3867                                    const char *name, void *opaque,
3868                                    Error **errp)
3869 {
3870     KVMState *s = KVM_STATE(obj);
3871     OnOffSplit mode;
3872 
3873     if (s->fd != -1) {
3874         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3875         return;
3876     }
3877 
3878     if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3879         return;
3880     }
3881     switch (mode) {
3882     case ON_OFF_SPLIT_ON:
3883         s->kernel_irqchip_allowed = true;
3884         s->kernel_irqchip_required = true;
3885         s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3886         break;
3887     case ON_OFF_SPLIT_OFF:
3888         s->kernel_irqchip_allowed = false;
3889         s->kernel_irqchip_required = false;
3890         s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3891         break;
3892     case ON_OFF_SPLIT_SPLIT:
3893         s->kernel_irqchip_allowed = true;
3894         s->kernel_irqchip_required = true;
3895         s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3896         break;
3897     default:
3898         /* The value was checked in visit_type_OnOffSplit() above. If
3899          * we get here, then something is wrong in QEMU.
3900          */
3901         abort();
3902     }
3903 }
3904 
3905 bool kvm_kernel_irqchip_allowed(void)
3906 {
3907     return kvm_state->kernel_irqchip_allowed;
3908 }
3909 
3910 bool kvm_kernel_irqchip_required(void)
3911 {
3912     return kvm_state->kernel_irqchip_required;
3913 }
3914 
3915 bool kvm_kernel_irqchip_split(void)
3916 {
3917     return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3918 }
3919 
3920 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3921                                     const char *name, void *opaque,
3922                                     Error **errp)
3923 {
3924     KVMState *s = KVM_STATE(obj);
3925     uint32_t value = s->kvm_dirty_ring_size;
3926 
3927     visit_type_uint32(v, name, &value, errp);
3928 }
3929 
3930 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3931                                     const char *name, void *opaque,
3932                                     Error **errp)
3933 {
3934     KVMState *s = KVM_STATE(obj);
3935     uint32_t value;
3936 
3937     if (s->fd != -1) {
3938         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3939         return;
3940     }
3941 
3942     if (!visit_type_uint32(v, name, &value, errp)) {
3943         return;
3944     }
3945     if (value & (value - 1)) {
3946         error_setg(errp, "dirty-ring-size must be a power of two.");
3947         return;
3948     }
3949 
3950     s->kvm_dirty_ring_size = value;
3951 }
3952 
3953 static char *kvm_get_device(Object *obj,
3954                             Error **errp G_GNUC_UNUSED)
3955 {
3956     KVMState *s = KVM_STATE(obj);
3957 
3958     return g_strdup(s->device);
3959 }
3960 
3961 static void kvm_set_device(Object *obj,
3962                            const char *value,
3963                            Error **errp G_GNUC_UNUSED)
3964 {
3965     KVMState *s = KVM_STATE(obj);
3966 
3967     g_free(s->device);
3968     s->device = g_strdup(value);
3969 }
3970 
3971 static void kvm_set_kvm_rapl(Object *obj, bool value, Error **errp)
3972 {
3973     KVMState *s = KVM_STATE(obj);
3974     s->msr_energy.enable = value;
3975 }
3976 
3977 static void kvm_set_kvm_rapl_socket_path(Object *obj,
3978                                          const char *str,
3979                                          Error **errp)
3980 {
3981     KVMState *s = KVM_STATE(obj);
3982     g_free(s->msr_energy.socket_path);
3983     s->msr_energy.socket_path = g_strdup(str);
3984 }
3985 
3986 static void kvm_accel_instance_init(Object *obj)
3987 {
3988     KVMState *s = KVM_STATE(obj);
3989 
3990     s->fd = -1;
3991     s->vmfd = -1;
3992     s->kvm_shadow_mem = -1;
3993     s->kernel_irqchip_allowed = true;
3994     s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3995     /* KVM dirty ring is by default off */
3996     s->kvm_dirty_ring_size = 0;
3997     s->kvm_dirty_ring_with_bitmap = false;
3998     s->kvm_eager_split_size = 0;
3999     s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
4000     s->notify_window = 0;
4001     s->xen_version = 0;
4002     s->xen_gnttab_max_frames = 64;
4003     s->xen_evtchn_max_pirq = 256;
4004     s->device = NULL;
4005     s->msr_energy.enable = false;
4006 }
4007 
4008 /**
4009  * kvm_gdbstub_sstep_flags():
4010  *
4011  * Returns: SSTEP_* flags that KVM supports for guest debug. The
4012  * support is probed during kvm_init()
4013  */
4014 static int kvm_gdbstub_sstep_flags(AccelState *as)
4015 {
4016     return kvm_sstep_flags;
4017 }
4018 
4019 static void kvm_accel_class_init(ObjectClass *oc, const void *data)
4020 {
4021     AccelClass *ac = ACCEL_CLASS(oc);
4022     ac->name = "KVM";
4023     ac->init_machine = kvm_init;
4024     ac->has_memory = kvm_accel_has_memory;
4025     ac->allowed = &kvm_allowed;
4026     ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
4027 
4028     object_class_property_add(oc, "kernel-irqchip", "on|off|split",
4029         NULL, kvm_set_kernel_irqchip,
4030         NULL, NULL);
4031     object_class_property_set_description(oc, "kernel-irqchip",
4032         "Configure KVM in-kernel irqchip");
4033 
4034     object_class_property_add(oc, "kvm-shadow-mem", "int",
4035         kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
4036         NULL, NULL);
4037     object_class_property_set_description(oc, "kvm-shadow-mem",
4038         "KVM shadow MMU size");
4039 
4040     object_class_property_add(oc, "dirty-ring-size", "uint32",
4041         kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
4042         NULL, NULL);
4043     object_class_property_set_description(oc, "dirty-ring-size",
4044         "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
4045 
4046     object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device);
4047     object_class_property_set_description(oc, "device",
4048         "Path to the device node to use (default: /dev/kvm)");
4049 
4050     object_class_property_add_bool(oc, "rapl",
4051                                    NULL,
4052                                    kvm_set_kvm_rapl);
4053     object_class_property_set_description(oc, "rapl",
4054         "Allow energy related MSRs for RAPL interface in Guest");
4055 
4056     object_class_property_add_str(oc, "rapl-helper-socket", NULL,
4057                                   kvm_set_kvm_rapl_socket_path);
4058     object_class_property_set_description(oc, "rapl-helper-socket",
4059         "Socket Path for comminucating with the Virtual MSR helper daemon");
4060 
4061     kvm_arch_accel_class_init(oc);
4062 }
4063 
4064 static const TypeInfo kvm_accel_type = {
4065     .name = TYPE_KVM_ACCEL,
4066     .parent = TYPE_ACCEL,
4067     .instance_init = kvm_accel_instance_init,
4068     .class_init = kvm_accel_class_init,
4069     .instance_size = sizeof(KVMState),
4070 };
4071 
4072 static void kvm_type_init(void)
4073 {
4074     type_register_static(&kvm_accel_type);
4075 }
4076 
4077 type_init(kvm_type_init);
4078 
4079 typedef struct StatsArgs {
4080     union StatsResultsType {
4081         StatsResultList **stats;
4082         StatsSchemaList **schema;
4083     } result;
4084     strList *names;
4085     Error **errp;
4086 } StatsArgs;
4087 
4088 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
4089                                     uint64_t *stats_data,
4090                                     StatsList *stats_list,
4091                                     Error **errp)
4092 {
4093 
4094     Stats *stats;
4095     uint64List *val_list = NULL;
4096 
4097     /* Only add stats that we understand.  */
4098     switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
4099     case KVM_STATS_TYPE_CUMULATIVE:
4100     case KVM_STATS_TYPE_INSTANT:
4101     case KVM_STATS_TYPE_PEAK:
4102     case KVM_STATS_TYPE_LINEAR_HIST:
4103     case KVM_STATS_TYPE_LOG_HIST:
4104         break;
4105     default:
4106         return stats_list;
4107     }
4108 
4109     switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
4110     case KVM_STATS_UNIT_NONE:
4111     case KVM_STATS_UNIT_BYTES:
4112     case KVM_STATS_UNIT_CYCLES:
4113     case KVM_STATS_UNIT_SECONDS:
4114     case KVM_STATS_UNIT_BOOLEAN:
4115         break;
4116     default:
4117         return stats_list;
4118     }
4119 
4120     switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4121     case KVM_STATS_BASE_POW10:
4122     case KVM_STATS_BASE_POW2:
4123         break;
4124     default:
4125         return stats_list;
4126     }
4127 
4128     /* Alloc and populate data list */
4129     stats = g_new0(Stats, 1);
4130     stats->name = g_strdup(pdesc->name);
4131     stats->value = g_new0(StatsValue, 1);
4132 
4133     if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
4134         stats->value->u.boolean = *stats_data;
4135         stats->value->type = QTYPE_QBOOL;
4136     } else if (pdesc->size == 1) {
4137         stats->value->u.scalar = *stats_data;
4138         stats->value->type = QTYPE_QNUM;
4139     } else {
4140         int i;
4141         for (i = 0; i < pdesc->size; i++) {
4142             QAPI_LIST_PREPEND(val_list, stats_data[i]);
4143         }
4144         stats->value->u.list = val_list;
4145         stats->value->type = QTYPE_QLIST;
4146     }
4147 
4148     QAPI_LIST_PREPEND(stats_list, stats);
4149     return stats_list;
4150 }
4151 
4152 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
4153                                                  StatsSchemaValueList *list,
4154                                                  Error **errp)
4155 {
4156     StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
4157     schema_entry->value = g_new0(StatsSchemaValue, 1);
4158 
4159     switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
4160     case KVM_STATS_TYPE_CUMULATIVE:
4161         schema_entry->value->type = STATS_TYPE_CUMULATIVE;
4162         break;
4163     case KVM_STATS_TYPE_INSTANT:
4164         schema_entry->value->type = STATS_TYPE_INSTANT;
4165         break;
4166     case KVM_STATS_TYPE_PEAK:
4167         schema_entry->value->type = STATS_TYPE_PEAK;
4168         break;
4169     case KVM_STATS_TYPE_LINEAR_HIST:
4170         schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
4171         schema_entry->value->bucket_size = pdesc->bucket_size;
4172         schema_entry->value->has_bucket_size = true;
4173         break;
4174     case KVM_STATS_TYPE_LOG_HIST:
4175         schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
4176         break;
4177     default:
4178         goto exit;
4179     }
4180 
4181     switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
4182     case KVM_STATS_UNIT_NONE:
4183         break;
4184     case KVM_STATS_UNIT_BOOLEAN:
4185         schema_entry->value->has_unit = true;
4186         schema_entry->value->unit = STATS_UNIT_BOOLEAN;
4187         break;
4188     case KVM_STATS_UNIT_BYTES:
4189         schema_entry->value->has_unit = true;
4190         schema_entry->value->unit = STATS_UNIT_BYTES;
4191         break;
4192     case KVM_STATS_UNIT_CYCLES:
4193         schema_entry->value->has_unit = true;
4194         schema_entry->value->unit = STATS_UNIT_CYCLES;
4195         break;
4196     case KVM_STATS_UNIT_SECONDS:
4197         schema_entry->value->has_unit = true;
4198         schema_entry->value->unit = STATS_UNIT_SECONDS;
4199         break;
4200     default:
4201         goto exit;
4202     }
4203 
4204     schema_entry->value->exponent = pdesc->exponent;
4205     if (pdesc->exponent) {
4206         switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4207         case KVM_STATS_BASE_POW10:
4208             schema_entry->value->has_base = true;
4209             schema_entry->value->base = 10;
4210             break;
4211         case KVM_STATS_BASE_POW2:
4212             schema_entry->value->has_base = true;
4213             schema_entry->value->base = 2;
4214             break;
4215         default:
4216             goto exit;
4217         }
4218     }
4219 
4220     schema_entry->value->name = g_strdup(pdesc->name);
4221     schema_entry->next = list;
4222     return schema_entry;
4223 exit:
4224     g_free(schema_entry->value);
4225     g_free(schema_entry);
4226     return list;
4227 }
4228 
4229 /* Cached stats descriptors */
4230 typedef struct StatsDescriptors {
4231     const char *ident; /* cache key, currently the StatsTarget */
4232     struct kvm_stats_desc *kvm_stats_desc;
4233     struct kvm_stats_header kvm_stats_header;
4234     QTAILQ_ENTRY(StatsDescriptors) next;
4235 } StatsDescriptors;
4236 
4237 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
4238     QTAILQ_HEAD_INITIALIZER(stats_descriptors);
4239 
4240 /*
4241  * Return the descriptors for 'target', that either have already been read
4242  * or are retrieved from 'stats_fd'.
4243  */
4244 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
4245                                                 Error **errp)
4246 {
4247     StatsDescriptors *descriptors;
4248     const char *ident;
4249     struct kvm_stats_desc *kvm_stats_desc;
4250     struct kvm_stats_header *kvm_stats_header;
4251     size_t size_desc;
4252     ssize_t ret;
4253 
4254     ident = StatsTarget_str(target);
4255     QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
4256         if (g_str_equal(descriptors->ident, ident)) {
4257             return descriptors;
4258         }
4259     }
4260 
4261     descriptors = g_new0(StatsDescriptors, 1);
4262 
4263     /* Read stats header */
4264     kvm_stats_header = &descriptors->kvm_stats_header;
4265     ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
4266     if (ret != sizeof(*kvm_stats_header)) {
4267         error_setg(errp, "KVM stats: failed to read stats header: "
4268                    "expected %zu actual %zu",
4269                    sizeof(*kvm_stats_header), ret);
4270         g_free(descriptors);
4271         return NULL;
4272     }
4273     size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4274 
4275     /* Read stats descriptors */
4276     kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
4277     ret = pread(stats_fd, kvm_stats_desc,
4278                 size_desc * kvm_stats_header->num_desc,
4279                 kvm_stats_header->desc_offset);
4280 
4281     if (ret != size_desc * kvm_stats_header->num_desc) {
4282         error_setg(errp, "KVM stats: failed to read stats descriptors: "
4283                    "expected %zu actual %zu",
4284                    size_desc * kvm_stats_header->num_desc, ret);
4285         g_free(descriptors);
4286         g_free(kvm_stats_desc);
4287         return NULL;
4288     }
4289     descriptors->kvm_stats_desc = kvm_stats_desc;
4290     descriptors->ident = ident;
4291     QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
4292     return descriptors;
4293 }
4294 
4295 static void query_stats(StatsResultList **result, StatsTarget target,
4296                         strList *names, int stats_fd, CPUState *cpu,
4297                         Error **errp)
4298 {
4299     struct kvm_stats_desc *kvm_stats_desc;
4300     struct kvm_stats_header *kvm_stats_header;
4301     StatsDescriptors *descriptors;
4302     g_autofree uint64_t *stats_data = NULL;
4303     struct kvm_stats_desc *pdesc;
4304     StatsList *stats_list = NULL;
4305     size_t size_desc, size_data = 0;
4306     ssize_t ret;
4307     int i;
4308 
4309     descriptors = find_stats_descriptors(target, stats_fd, errp);
4310     if (!descriptors) {
4311         return;
4312     }
4313 
4314     kvm_stats_header = &descriptors->kvm_stats_header;
4315     kvm_stats_desc = descriptors->kvm_stats_desc;
4316     size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4317 
4318     /* Tally the total data size; read schema data */
4319     for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4320         pdesc = (void *)kvm_stats_desc + i * size_desc;
4321         size_data += pdesc->size * sizeof(*stats_data);
4322     }
4323 
4324     stats_data = g_malloc0(size_data);
4325     ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
4326 
4327     if (ret != size_data) {
4328         error_setg(errp, "KVM stats: failed to read data: "
4329                    "expected %zu actual %zu", size_data, ret);
4330         return;
4331     }
4332 
4333     for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4334         uint64_t *stats;
4335         pdesc = (void *)kvm_stats_desc + i * size_desc;
4336 
4337         /* Add entry to the list */
4338         stats = (void *)stats_data + pdesc->offset;
4339         if (!apply_str_list_filter(pdesc->name, names)) {
4340             continue;
4341         }
4342         stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
4343     }
4344 
4345     if (!stats_list) {
4346         return;
4347     }
4348 
4349     switch (target) {
4350     case STATS_TARGET_VM:
4351         add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
4352         break;
4353     case STATS_TARGET_VCPU:
4354         add_stats_entry(result, STATS_PROVIDER_KVM,
4355                         cpu->parent_obj.canonical_path,
4356                         stats_list);
4357         break;
4358     default:
4359         g_assert_not_reached();
4360     }
4361 }
4362 
4363 static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
4364                                int stats_fd, Error **errp)
4365 {
4366     struct kvm_stats_desc *kvm_stats_desc;
4367     struct kvm_stats_header *kvm_stats_header;
4368     StatsDescriptors *descriptors;
4369     struct kvm_stats_desc *pdesc;
4370     StatsSchemaValueList *stats_list = NULL;
4371     size_t size_desc;
4372     int i;
4373 
4374     descriptors = find_stats_descriptors(target, stats_fd, errp);
4375     if (!descriptors) {
4376         return;
4377     }
4378 
4379     kvm_stats_header = &descriptors->kvm_stats_header;
4380     kvm_stats_desc = descriptors->kvm_stats_desc;
4381     size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4382 
4383     /* Tally the total data size; read schema data */
4384     for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4385         pdesc = (void *)kvm_stats_desc + i * size_desc;
4386         stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
4387     }
4388 
4389     add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
4390 }
4391 
4392 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4393 {
4394     int stats_fd = cpu->kvm_vcpu_stats_fd;
4395     Error *local_err = NULL;
4396 
4397     if (stats_fd == -1) {
4398         error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4399         error_propagate(kvm_stats_args->errp, local_err);
4400         return;
4401     }
4402     query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4403                 kvm_stats_args->names, stats_fd, cpu,
4404                 kvm_stats_args->errp);
4405 }
4406 
4407 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4408 {
4409     int stats_fd = cpu->kvm_vcpu_stats_fd;
4410     Error *local_err = NULL;
4411 
4412     if (stats_fd == -1) {
4413         error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4414         error_propagate(kvm_stats_args->errp, local_err);
4415         return;
4416     }
4417     query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4418                        kvm_stats_args->errp);
4419 }
4420 
4421 static void query_stats_cb(StatsResultList **result, StatsTarget target,
4422                            strList *names, strList *targets, Error **errp)
4423 {
4424     KVMState *s = kvm_state;
4425     CPUState *cpu;
4426     int stats_fd;
4427 
4428     switch (target) {
4429     case STATS_TARGET_VM:
4430     {
4431         stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4432         if (stats_fd == -1) {
4433             error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4434             return;
4435         }
4436         query_stats(result, target, names, stats_fd, NULL, errp);
4437         close(stats_fd);
4438         break;
4439     }
4440     case STATS_TARGET_VCPU:
4441     {
4442         StatsArgs stats_args;
4443         stats_args.result.stats = result;
4444         stats_args.names = names;
4445         stats_args.errp = errp;
4446         CPU_FOREACH(cpu) {
4447             if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4448                 continue;
4449             }
4450             query_stats_vcpu(cpu, &stats_args);
4451         }
4452         break;
4453     }
4454     default:
4455         break;
4456     }
4457 }
4458 
4459 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4460 {
4461     StatsArgs stats_args;
4462     KVMState *s = kvm_state;
4463     int stats_fd;
4464 
4465     stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4466     if (stats_fd == -1) {
4467         error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4468         return;
4469     }
4470     query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4471     close(stats_fd);
4472 
4473     if (first_cpu) {
4474         stats_args.result.schema = result;
4475         stats_args.errp = errp;
4476         query_stats_schema_vcpu(first_cpu, &stats_args);
4477     }
4478 }
4479 
4480 void kvm_mark_guest_state_protected(void)
4481 {
4482     kvm_state->guest_state_protected = true;
4483 }
4484 
4485 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
4486 {
4487     int fd;
4488     struct kvm_create_guest_memfd guest_memfd = {
4489         .size = size,
4490         .flags = flags,
4491     };
4492 
4493     if (!kvm_guest_memfd_supported) {
4494         error_setg(errp, "KVM does not support guest_memfd");
4495         return -1;
4496     }
4497 
4498     fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
4499     if (fd < 0) {
4500         error_setg_errno(errp, errno, "Error creating KVM guest_memfd");
4501         return -1;
4502     }
4503 
4504     return fd;
4505 }
4506