xref: /openbmc/qemu/accel/kvm/kvm-all.c (revision b14df228)
1 /*
2  * QEMU KVM support
3  *
4  * Copyright IBM, Corp. 2008
5  *           Red Hat, Inc. 2008
6  *
7  * Authors:
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *  Glauber Costa     <gcosta@redhat.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  *
14  */
15 
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <poll.h>
19 
20 #include <linux/kvm.h>
21 
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/cpus.h"
34 #include "qemu/bswap.h"
35 #include "exec/memory.h"
36 #include "exec/ram_addr.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/main-loop.h"
39 #include "trace.h"
40 #include "hw/irq.h"
41 #include "qapi/visitor.h"
42 #include "qapi/qapi-types-common.h"
43 #include "qapi/qapi-visit-common.h"
44 #include "sysemu/reset.h"
45 #include "qemu/guest-random.h"
46 #include "sysemu/hw_accel.h"
47 #include "kvm-cpus.h"
48 #include "sysemu/dirtylimit.h"
49 
50 #include "hw/boards.h"
51 #include "monitor/stats.h"
52 
53 /* This check must be after config-host.h is included */
54 #ifdef CONFIG_EVENTFD
55 #include <sys/eventfd.h>
56 #endif
57 
58 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
59  * need to use the real host PAGE_SIZE, as that's what KVM will use.
60  */
61 #ifdef PAGE_SIZE
62 #undef PAGE_SIZE
63 #endif
64 #define PAGE_SIZE qemu_real_host_page_size()
65 
66 #ifndef KVM_GUESTDBG_BLOCKIRQ
67 #define KVM_GUESTDBG_BLOCKIRQ 0
68 #endif
69 
70 //#define DEBUG_KVM
71 
72 #ifdef DEBUG_KVM
73 #define DPRINTF(fmt, ...) \
74     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
75 #else
76 #define DPRINTF(fmt, ...) \
77     do { } while (0)
78 #endif
79 
80 #define KVM_MSI_HASHTAB_SIZE    256
81 
82 struct KVMParkedVcpu {
83     unsigned long vcpu_id;
84     int kvm_fd;
85     QLIST_ENTRY(KVMParkedVcpu) node;
86 };
87 
88 enum KVMDirtyRingReaperState {
89     KVM_DIRTY_RING_REAPER_NONE = 0,
90     /* The reaper is sleeping */
91     KVM_DIRTY_RING_REAPER_WAIT,
92     /* The reaper is reaping for dirty pages */
93     KVM_DIRTY_RING_REAPER_REAPING,
94 };
95 
96 /*
97  * KVM reaper instance, responsible for collecting the KVM dirty bits
98  * via the dirty ring.
99  */
100 struct KVMDirtyRingReaper {
101     /* The reaper thread */
102     QemuThread reaper_thr;
103     volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
104     volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
105 };
106 
107 struct KVMState
108 {
109     AccelState parent_obj;
110 
111     int nr_slots;
112     int fd;
113     int vmfd;
114     int coalesced_mmio;
115     int coalesced_pio;
116     struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
117     bool coalesced_flush_in_progress;
118     int vcpu_events;
119     int robust_singlestep;
120     int debugregs;
121 #ifdef KVM_CAP_SET_GUEST_DEBUG
122     QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
123 #endif
124     int max_nested_state_len;
125     int many_ioeventfds;
126     int intx_set_mask;
127     int kvm_shadow_mem;
128     bool kernel_irqchip_allowed;
129     bool kernel_irqchip_required;
130     OnOffAuto kernel_irqchip_split;
131     bool sync_mmu;
132     uint64_t manual_dirty_log_protect;
133     /* The man page (and posix) say ioctl numbers are signed int, but
134      * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
135      * unsigned, and treating them as signed here can break things */
136     unsigned irq_set_ioctl;
137     unsigned int sigmask_len;
138     GHashTable *gsimap;
139 #ifdef KVM_CAP_IRQ_ROUTING
140     struct kvm_irq_routing *irq_routes;
141     int nr_allocated_irq_routes;
142     unsigned long *used_gsi_bitmap;
143     unsigned int gsi_count;
144     QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
145 #endif
146     KVMMemoryListener memory_listener;
147     QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
148 
149     /* For "info mtree -f" to tell if an MR is registered in KVM */
150     int nr_as;
151     struct KVMAs {
152         KVMMemoryListener *ml;
153         AddressSpace *as;
154     } *as;
155     uint64_t kvm_dirty_ring_bytes;  /* Size of the per-vcpu dirty ring */
156     uint32_t kvm_dirty_ring_size;   /* Number of dirty GFNs per ring */
157     struct KVMDirtyRingReaper reaper;
158 };
159 
160 KVMState *kvm_state;
161 bool kvm_kernel_irqchip;
162 bool kvm_split_irqchip;
163 bool kvm_async_interrupts_allowed;
164 bool kvm_halt_in_kernel_allowed;
165 bool kvm_eventfds_allowed;
166 bool kvm_irqfds_allowed;
167 bool kvm_resamplefds_allowed;
168 bool kvm_msi_via_irqfd_allowed;
169 bool kvm_gsi_routing_allowed;
170 bool kvm_gsi_direct_mapping;
171 bool kvm_allowed;
172 bool kvm_readonly_mem_allowed;
173 bool kvm_vm_attributes_allowed;
174 bool kvm_direct_msi_allowed;
175 bool kvm_ioeventfd_any_length_allowed;
176 bool kvm_msi_use_devid;
177 bool kvm_has_guest_debug;
178 int kvm_sstep_flags;
179 static bool kvm_immediate_exit;
180 static hwaddr kvm_max_slot_size = ~0;
181 
182 static const KVMCapabilityInfo kvm_required_capabilites[] = {
183     KVM_CAP_INFO(USER_MEMORY),
184     KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
185     KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
186     KVM_CAP_LAST_INFO
187 };
188 
189 static NotifierList kvm_irqchip_change_notifiers =
190     NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
191 
192 struct KVMResampleFd {
193     int gsi;
194     EventNotifier *resample_event;
195     QLIST_ENTRY(KVMResampleFd) node;
196 };
197 typedef struct KVMResampleFd KVMResampleFd;
198 
199 /*
200  * Only used with split irqchip where we need to do the resample fd
201  * kick for the kernel from userspace.
202  */
203 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
204     QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
205 
206 static QemuMutex kml_slots_lock;
207 
208 #define kvm_slots_lock()    qemu_mutex_lock(&kml_slots_lock)
209 #define kvm_slots_unlock()  qemu_mutex_unlock(&kml_slots_lock)
210 
211 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
212 
213 static inline void kvm_resample_fd_remove(int gsi)
214 {
215     KVMResampleFd *rfd;
216 
217     QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
218         if (rfd->gsi == gsi) {
219             QLIST_REMOVE(rfd, node);
220             g_free(rfd);
221             break;
222         }
223     }
224 }
225 
226 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
227 {
228     KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
229 
230     rfd->gsi = gsi;
231     rfd->resample_event = event;
232 
233     QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
234 }
235 
236 void kvm_resample_fd_notify(int gsi)
237 {
238     KVMResampleFd *rfd;
239 
240     QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
241         if (rfd->gsi == gsi) {
242             event_notifier_set(rfd->resample_event);
243             trace_kvm_resample_fd_notify(gsi);
244             return;
245         }
246     }
247 }
248 
249 int kvm_get_max_memslots(void)
250 {
251     KVMState *s = KVM_STATE(current_accel());
252 
253     return s->nr_slots;
254 }
255 
256 /* Called with KVMMemoryListener.slots_lock held */
257 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
258 {
259     KVMState *s = kvm_state;
260     int i;
261 
262     for (i = 0; i < s->nr_slots; i++) {
263         if (kml->slots[i].memory_size == 0) {
264             return &kml->slots[i];
265         }
266     }
267 
268     return NULL;
269 }
270 
271 bool kvm_has_free_slot(MachineState *ms)
272 {
273     KVMState *s = KVM_STATE(ms->accelerator);
274     bool result;
275     KVMMemoryListener *kml = &s->memory_listener;
276 
277     kvm_slots_lock();
278     result = !!kvm_get_free_slot(kml);
279     kvm_slots_unlock();
280 
281     return result;
282 }
283 
284 /* Called with KVMMemoryListener.slots_lock held */
285 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
286 {
287     KVMSlot *slot = kvm_get_free_slot(kml);
288 
289     if (slot) {
290         return slot;
291     }
292 
293     fprintf(stderr, "%s: no free slot available\n", __func__);
294     abort();
295 }
296 
297 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
298                                          hwaddr start_addr,
299                                          hwaddr size)
300 {
301     KVMState *s = kvm_state;
302     int i;
303 
304     for (i = 0; i < s->nr_slots; i++) {
305         KVMSlot *mem = &kml->slots[i];
306 
307         if (start_addr == mem->start_addr && size == mem->memory_size) {
308             return mem;
309         }
310     }
311 
312     return NULL;
313 }
314 
315 /*
316  * Calculate and align the start address and the size of the section.
317  * Return the size. If the size is 0, the aligned section is empty.
318  */
319 static hwaddr kvm_align_section(MemoryRegionSection *section,
320                                 hwaddr *start)
321 {
322     hwaddr size = int128_get64(section->size);
323     hwaddr delta, aligned;
324 
325     /* kvm works in page size chunks, but the function may be called
326        with sub-page size and unaligned start address. Pad the start
327        address to next and truncate size to previous page boundary. */
328     aligned = ROUND_UP(section->offset_within_address_space,
329                        qemu_real_host_page_size());
330     delta = aligned - section->offset_within_address_space;
331     *start = aligned;
332     if (delta > size) {
333         return 0;
334     }
335 
336     return (size - delta) & qemu_real_host_page_mask();
337 }
338 
339 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
340                                        hwaddr *phys_addr)
341 {
342     KVMMemoryListener *kml = &s->memory_listener;
343     int i, ret = 0;
344 
345     kvm_slots_lock();
346     for (i = 0; i < s->nr_slots; i++) {
347         KVMSlot *mem = &kml->slots[i];
348 
349         if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
350             *phys_addr = mem->start_addr + (ram - mem->ram);
351             ret = 1;
352             break;
353         }
354     }
355     kvm_slots_unlock();
356 
357     return ret;
358 }
359 
360 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
361 {
362     KVMState *s = kvm_state;
363     struct kvm_userspace_memory_region mem;
364     int ret;
365 
366     mem.slot = slot->slot | (kml->as_id << 16);
367     mem.guest_phys_addr = slot->start_addr;
368     mem.userspace_addr = (unsigned long)slot->ram;
369     mem.flags = slot->flags;
370 
371     if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
372         /* Set the slot size to 0 before setting the slot to the desired
373          * value. This is needed based on KVM commit 75d61fbc. */
374         mem.memory_size = 0;
375         ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
376         if (ret < 0) {
377             goto err;
378         }
379     }
380     mem.memory_size = slot->memory_size;
381     ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
382     slot->old_flags = mem.flags;
383 err:
384     trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
385                               mem.memory_size, mem.userspace_addr, ret);
386     if (ret < 0) {
387         error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
388                      " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
389                      __func__, mem.slot, slot->start_addr,
390                      (uint64_t)mem.memory_size, strerror(errno));
391     }
392     return ret;
393 }
394 
395 static int do_kvm_destroy_vcpu(CPUState *cpu)
396 {
397     KVMState *s = kvm_state;
398     long mmap_size;
399     struct KVMParkedVcpu *vcpu = NULL;
400     int ret = 0;
401 
402     DPRINTF("kvm_destroy_vcpu\n");
403 
404     ret = kvm_arch_destroy_vcpu(cpu);
405     if (ret < 0) {
406         goto err;
407     }
408 
409     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
410     if (mmap_size < 0) {
411         ret = mmap_size;
412         DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
413         goto err;
414     }
415 
416     ret = munmap(cpu->kvm_run, mmap_size);
417     if (ret < 0) {
418         goto err;
419     }
420 
421     if (cpu->kvm_dirty_gfns) {
422         ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
423         if (ret < 0) {
424             goto err;
425         }
426     }
427 
428     vcpu = g_malloc0(sizeof(*vcpu));
429     vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
430     vcpu->kvm_fd = cpu->kvm_fd;
431     QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
432 err:
433     return ret;
434 }
435 
436 void kvm_destroy_vcpu(CPUState *cpu)
437 {
438     if (do_kvm_destroy_vcpu(cpu) < 0) {
439         error_report("kvm_destroy_vcpu failed");
440         exit(EXIT_FAILURE);
441     }
442 }
443 
444 static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
445 {
446     struct KVMParkedVcpu *cpu;
447 
448     QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
449         if (cpu->vcpu_id == vcpu_id) {
450             int kvm_fd;
451 
452             QLIST_REMOVE(cpu, node);
453             kvm_fd = cpu->kvm_fd;
454             g_free(cpu);
455             return kvm_fd;
456         }
457     }
458 
459     return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
460 }
461 
462 int kvm_init_vcpu(CPUState *cpu, Error **errp)
463 {
464     KVMState *s = kvm_state;
465     long mmap_size;
466     int ret;
467 
468     trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
469 
470     ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
471     if (ret < 0) {
472         error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
473                          kvm_arch_vcpu_id(cpu));
474         goto err;
475     }
476 
477     cpu->kvm_fd = ret;
478     cpu->kvm_state = s;
479     cpu->vcpu_dirty = true;
480     cpu->dirty_pages = 0;
481     cpu->throttle_us_per_full = 0;
482 
483     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
484     if (mmap_size < 0) {
485         ret = mmap_size;
486         error_setg_errno(errp, -mmap_size,
487                          "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
488         goto err;
489     }
490 
491     cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
492                         cpu->kvm_fd, 0);
493     if (cpu->kvm_run == MAP_FAILED) {
494         ret = -errno;
495         error_setg_errno(errp, ret,
496                          "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
497                          kvm_arch_vcpu_id(cpu));
498         goto err;
499     }
500 
501     if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
502         s->coalesced_mmio_ring =
503             (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
504     }
505 
506     if (s->kvm_dirty_ring_size) {
507         /* Use MAP_SHARED to share pages with the kernel */
508         cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
509                                    PROT_READ | PROT_WRITE, MAP_SHARED,
510                                    cpu->kvm_fd,
511                                    PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
512         if (cpu->kvm_dirty_gfns == MAP_FAILED) {
513             ret = -errno;
514             DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret);
515             goto err;
516         }
517     }
518 
519     ret = kvm_arch_init_vcpu(cpu);
520     if (ret < 0) {
521         error_setg_errno(errp, -ret,
522                          "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
523                          kvm_arch_vcpu_id(cpu));
524     }
525 err:
526     return ret;
527 }
528 
529 /*
530  * dirty pages logging control
531  */
532 
533 static int kvm_mem_flags(MemoryRegion *mr)
534 {
535     bool readonly = mr->readonly || memory_region_is_romd(mr);
536     int flags = 0;
537 
538     if (memory_region_get_dirty_log_mask(mr) != 0) {
539         flags |= KVM_MEM_LOG_DIRTY_PAGES;
540     }
541     if (readonly && kvm_readonly_mem_allowed) {
542         flags |= KVM_MEM_READONLY;
543     }
544     return flags;
545 }
546 
547 /* Called with KVMMemoryListener.slots_lock held */
548 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
549                                  MemoryRegion *mr)
550 {
551     mem->flags = kvm_mem_flags(mr);
552 
553     /* If nothing changed effectively, no need to issue ioctl */
554     if (mem->flags == mem->old_flags) {
555         return 0;
556     }
557 
558     kvm_slot_init_dirty_bitmap(mem);
559     return kvm_set_user_memory_region(kml, mem, false);
560 }
561 
562 static int kvm_section_update_flags(KVMMemoryListener *kml,
563                                     MemoryRegionSection *section)
564 {
565     hwaddr start_addr, size, slot_size;
566     KVMSlot *mem;
567     int ret = 0;
568 
569     size = kvm_align_section(section, &start_addr);
570     if (!size) {
571         return 0;
572     }
573 
574     kvm_slots_lock();
575 
576     while (size && !ret) {
577         slot_size = MIN(kvm_max_slot_size, size);
578         mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
579         if (!mem) {
580             /* We don't have a slot if we want to trap every access. */
581             goto out;
582         }
583 
584         ret = kvm_slot_update_flags(kml, mem, section->mr);
585         start_addr += slot_size;
586         size -= slot_size;
587     }
588 
589 out:
590     kvm_slots_unlock();
591     return ret;
592 }
593 
594 static void kvm_log_start(MemoryListener *listener,
595                           MemoryRegionSection *section,
596                           int old, int new)
597 {
598     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
599     int r;
600 
601     if (old != 0) {
602         return;
603     }
604 
605     r = kvm_section_update_flags(kml, section);
606     if (r < 0) {
607         abort();
608     }
609 }
610 
611 static void kvm_log_stop(MemoryListener *listener,
612                           MemoryRegionSection *section,
613                           int old, int new)
614 {
615     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
616     int r;
617 
618     if (new != 0) {
619         return;
620     }
621 
622     r = kvm_section_update_flags(kml, section);
623     if (r < 0) {
624         abort();
625     }
626 }
627 
628 /* get kvm's dirty pages bitmap and update qemu's */
629 static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
630 {
631     ram_addr_t start = slot->ram_start_offset;
632     ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
633 
634     cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
635 }
636 
637 static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
638 {
639     memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
640 }
641 
642 #define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
643 
644 /* Allocate the dirty bitmap for a slot  */
645 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
646 {
647     if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
648         return;
649     }
650 
651     /*
652      * XXX bad kernel interface alert
653      * For dirty bitmap, kernel allocates array of size aligned to
654      * bits-per-long.  But for case when the kernel is 64bits and
655      * the userspace is 32bits, userspace can't align to the same
656      * bits-per-long, since sizeof(long) is different between kernel
657      * and user space.  This way, userspace will provide buffer which
658      * may be 4 bytes less than the kernel will use, resulting in
659      * userspace memory corruption (which is not detectable by valgrind
660      * too, in most cases).
661      * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
662      * a hope that sizeof(long) won't become >8 any time soon.
663      *
664      * Note: the granule of kvm dirty log is qemu_real_host_page_size.
665      * And mem->memory_size is aligned to it (otherwise this mem can't
666      * be registered to KVM).
667      */
668     hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
669                                         /*HOST_LONG_BITS*/ 64) / 8;
670     mem->dirty_bmap = g_malloc0(bitmap_size);
671     mem->dirty_bmap_size = bitmap_size;
672 }
673 
674 /*
675  * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
676  * succeeded, false otherwise
677  */
678 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
679 {
680     struct kvm_dirty_log d = {};
681     int ret;
682 
683     d.dirty_bitmap = slot->dirty_bmap;
684     d.slot = slot->slot | (slot->as_id << 16);
685     ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
686 
687     if (ret == -ENOENT) {
688         /* kernel does not have dirty bitmap in this slot */
689         ret = 0;
690     }
691     if (ret) {
692         error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
693                           __func__, ret);
694     }
695     return ret == 0;
696 }
697 
698 /* Should be with all slots_lock held for the address spaces. */
699 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
700                                      uint32_t slot_id, uint64_t offset)
701 {
702     KVMMemoryListener *kml;
703     KVMSlot *mem;
704 
705     if (as_id >= s->nr_as) {
706         return;
707     }
708 
709     kml = s->as[as_id].ml;
710     mem = &kml->slots[slot_id];
711 
712     if (!mem->memory_size || offset >=
713         (mem->memory_size / qemu_real_host_page_size())) {
714         return;
715     }
716 
717     set_bit(offset, mem->dirty_bmap);
718 }
719 
720 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
721 {
722     /*
723      * Read the flags before the value.  Pairs with barrier in
724      * KVM's kvm_dirty_ring_push() function.
725      */
726     return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
727 }
728 
729 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
730 {
731     gfn->flags = KVM_DIRTY_GFN_F_RESET;
732 }
733 
734 /*
735  * Should be with all slots_lock held for the address spaces.  It returns the
736  * dirty page we've collected on this dirty ring.
737  */
738 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
739 {
740     struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
741     uint32_t ring_size = s->kvm_dirty_ring_size;
742     uint32_t count = 0, fetch = cpu->kvm_fetch_index;
743 
744     assert(dirty_gfns && ring_size);
745     trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
746 
747     while (true) {
748         cur = &dirty_gfns[fetch % ring_size];
749         if (!dirty_gfn_is_dirtied(cur)) {
750             break;
751         }
752         kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
753                                  cur->offset);
754         dirty_gfn_set_collected(cur);
755         trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
756         fetch++;
757         count++;
758     }
759     cpu->kvm_fetch_index = fetch;
760     cpu->dirty_pages += count;
761 
762     return count;
763 }
764 
765 /* Must be with slots_lock held */
766 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
767 {
768     int ret;
769     uint64_t total = 0;
770     int64_t stamp;
771 
772     stamp = get_clock();
773 
774     if (cpu) {
775         total = kvm_dirty_ring_reap_one(s, cpu);
776     } else {
777         CPU_FOREACH(cpu) {
778             total += kvm_dirty_ring_reap_one(s, cpu);
779         }
780     }
781 
782     if (total) {
783         ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
784         assert(ret == total);
785     }
786 
787     stamp = get_clock() - stamp;
788 
789     if (total) {
790         trace_kvm_dirty_ring_reap(total, stamp / 1000);
791     }
792 
793     return total;
794 }
795 
796 /*
797  * Currently for simplicity, we must hold BQL before calling this.  We can
798  * consider to drop the BQL if we're clear with all the race conditions.
799  */
800 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
801 {
802     uint64_t total;
803 
804     /*
805      * We need to lock all kvm slots for all address spaces here,
806      * because:
807      *
808      * (1) We need to mark dirty for dirty bitmaps in multiple slots
809      *     and for tons of pages, so it's better to take the lock here
810      *     once rather than once per page.  And more importantly,
811      *
812      * (2) We must _NOT_ publish dirty bits to the other threads
813      *     (e.g., the migration thread) via the kvm memory slot dirty
814      *     bitmaps before correctly re-protect those dirtied pages.
815      *     Otherwise we can have potential risk of data corruption if
816      *     the page data is read in the other thread before we do
817      *     reset below.
818      */
819     kvm_slots_lock();
820     total = kvm_dirty_ring_reap_locked(s, cpu);
821     kvm_slots_unlock();
822 
823     return total;
824 }
825 
826 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
827 {
828     /* No need to do anything */
829 }
830 
831 /*
832  * Kick all vcpus out in a synchronized way.  When returned, we
833  * guarantee that every vcpu has been kicked and at least returned to
834  * userspace once.
835  */
836 static void kvm_cpu_synchronize_kick_all(void)
837 {
838     CPUState *cpu;
839 
840     CPU_FOREACH(cpu) {
841         run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
842     }
843 }
844 
845 /*
846  * Flush all the existing dirty pages to the KVM slot buffers.  When
847  * this call returns, we guarantee that all the touched dirty pages
848  * before calling this function have been put into the per-kvmslot
849  * dirty bitmap.
850  *
851  * This function must be called with BQL held.
852  */
853 static void kvm_dirty_ring_flush(void)
854 {
855     trace_kvm_dirty_ring_flush(0);
856     /*
857      * The function needs to be serialized.  Since this function
858      * should always be with BQL held, serialization is guaranteed.
859      * However, let's be sure of it.
860      */
861     assert(qemu_mutex_iothread_locked());
862     /*
863      * First make sure to flush the hardware buffers by kicking all
864      * vcpus out in a synchronous way.
865      */
866     kvm_cpu_synchronize_kick_all();
867     kvm_dirty_ring_reap(kvm_state, NULL);
868     trace_kvm_dirty_ring_flush(1);
869 }
870 
871 /**
872  * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
873  *
874  * This function will first try to fetch dirty bitmap from the kernel,
875  * and then updates qemu's dirty bitmap.
876  *
877  * NOTE: caller must be with kml->slots_lock held.
878  *
879  * @kml: the KVM memory listener object
880  * @section: the memory section to sync the dirty bitmap with
881  */
882 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
883                                            MemoryRegionSection *section)
884 {
885     KVMState *s = kvm_state;
886     KVMSlot *mem;
887     hwaddr start_addr, size;
888     hwaddr slot_size;
889 
890     size = kvm_align_section(section, &start_addr);
891     while (size) {
892         slot_size = MIN(kvm_max_slot_size, size);
893         mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
894         if (!mem) {
895             /* We don't have a slot if we want to trap every access. */
896             return;
897         }
898         if (kvm_slot_get_dirty_log(s, mem)) {
899             kvm_slot_sync_dirty_pages(mem);
900         }
901         start_addr += slot_size;
902         size -= slot_size;
903     }
904 }
905 
906 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
907 #define KVM_CLEAR_LOG_SHIFT  6
908 #define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
909 #define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)
910 
911 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
912                                   uint64_t size)
913 {
914     KVMState *s = kvm_state;
915     uint64_t end, bmap_start, start_delta, bmap_npages;
916     struct kvm_clear_dirty_log d;
917     unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
918     int ret;
919 
920     /*
921      * We need to extend either the start or the size or both to
922      * satisfy the KVM interface requirement.  Firstly, do the start
923      * page alignment on 64 host pages
924      */
925     bmap_start = start & KVM_CLEAR_LOG_MASK;
926     start_delta = start - bmap_start;
927     bmap_start /= psize;
928 
929     /*
930      * The kernel interface has restriction on the size too, that either:
931      *
932      * (1) the size is 64 host pages aligned (just like the start), or
933      * (2) the size fills up until the end of the KVM memslot.
934      */
935     bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
936         << KVM_CLEAR_LOG_SHIFT;
937     end = mem->memory_size / psize;
938     if (bmap_npages > end - bmap_start) {
939         bmap_npages = end - bmap_start;
940     }
941     start_delta /= psize;
942 
943     /*
944      * Prepare the bitmap to clear dirty bits.  Here we must guarantee
945      * that we won't clear any unknown dirty bits otherwise we might
946      * accidentally clear some set bits which are not yet synced from
947      * the kernel into QEMU's bitmap, then we'll lose track of the
948      * guest modifications upon those pages (which can directly lead
949      * to guest data loss or panic after migration).
950      *
951      * Layout of the KVMSlot.dirty_bmap:
952      *
953      *                   |<-------- bmap_npages -----------..>|
954      *                                                     [1]
955      *                     start_delta         size
956      *  |----------------|-------------|------------------|------------|
957      *  ^                ^             ^                               ^
958      *  |                |             |                               |
959      * start          bmap_start     (start)                         end
960      * of memslot                                             of memslot
961      *
962      * [1] bmap_npages can be aligned to either 64 pages or the end of slot
963      */
964 
965     assert(bmap_start % BITS_PER_LONG == 0);
966     /* We should never do log_clear before log_sync */
967     assert(mem->dirty_bmap);
968     if (start_delta || bmap_npages - size / psize) {
969         /* Slow path - we need to manipulate a temp bitmap */
970         bmap_clear = bitmap_new(bmap_npages);
971         bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
972                                     bmap_start, start_delta + size / psize);
973         /*
974          * We need to fill the holes at start because that was not
975          * specified by the caller and we extended the bitmap only for
976          * 64 pages alignment
977          */
978         bitmap_clear(bmap_clear, 0, start_delta);
979         d.dirty_bitmap = bmap_clear;
980     } else {
981         /*
982          * Fast path - both start and size align well with BITS_PER_LONG
983          * (or the end of memory slot)
984          */
985         d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
986     }
987 
988     d.first_page = bmap_start;
989     /* It should never overflow.  If it happens, say something */
990     assert(bmap_npages <= UINT32_MAX);
991     d.num_pages = bmap_npages;
992     d.slot = mem->slot | (as_id << 16);
993 
994     ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
995     if (ret < 0 && ret != -ENOENT) {
996         error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
997                      "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
998                      __func__, d.slot, (uint64_t)d.first_page,
999                      (uint32_t)d.num_pages, ret);
1000     } else {
1001         ret = 0;
1002         trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
1003     }
1004 
1005     /*
1006      * After we have updated the remote dirty bitmap, we update the
1007      * cached bitmap as well for the memslot, then if another user
1008      * clears the same region we know we shouldn't clear it again on
1009      * the remote otherwise it's data loss as well.
1010      */
1011     bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
1012                  size / psize);
1013     /* This handles the NULL case well */
1014     g_free(bmap_clear);
1015     return ret;
1016 }
1017 
1018 
1019 /**
1020  * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1021  *
1022  * NOTE: this will be a no-op if we haven't enabled manual dirty log
1023  * protection in the host kernel because in that case this operation
1024  * will be done within log_sync().
1025  *
1026  * @kml:     the kvm memory listener
1027  * @section: the memory range to clear dirty bitmap
1028  */
1029 static int kvm_physical_log_clear(KVMMemoryListener *kml,
1030                                   MemoryRegionSection *section)
1031 {
1032     KVMState *s = kvm_state;
1033     uint64_t start, size, offset, count;
1034     KVMSlot *mem;
1035     int ret = 0, i;
1036 
1037     if (!s->manual_dirty_log_protect) {
1038         /* No need to do explicit clear */
1039         return ret;
1040     }
1041 
1042     start = section->offset_within_address_space;
1043     size = int128_get64(section->size);
1044 
1045     if (!size) {
1046         /* Nothing more we can do... */
1047         return ret;
1048     }
1049 
1050     kvm_slots_lock();
1051 
1052     for (i = 0; i < s->nr_slots; i++) {
1053         mem = &kml->slots[i];
1054         /* Discard slots that are empty or do not overlap the section */
1055         if (!mem->memory_size ||
1056             mem->start_addr > start + size - 1 ||
1057             start > mem->start_addr + mem->memory_size - 1) {
1058             continue;
1059         }
1060 
1061         if (start >= mem->start_addr) {
1062             /* The slot starts before section or is aligned to it.  */
1063             offset = start - mem->start_addr;
1064             count = MIN(mem->memory_size - offset, size);
1065         } else {
1066             /* The slot starts after section.  */
1067             offset = 0;
1068             count = MIN(mem->memory_size, size - (mem->start_addr - start));
1069         }
1070         ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1071         if (ret < 0) {
1072             break;
1073         }
1074     }
1075 
1076     kvm_slots_unlock();
1077 
1078     return ret;
1079 }
1080 
1081 static void kvm_coalesce_mmio_region(MemoryListener *listener,
1082                                      MemoryRegionSection *secion,
1083                                      hwaddr start, hwaddr size)
1084 {
1085     KVMState *s = kvm_state;
1086 
1087     if (s->coalesced_mmio) {
1088         struct kvm_coalesced_mmio_zone zone;
1089 
1090         zone.addr = start;
1091         zone.size = size;
1092         zone.pad = 0;
1093 
1094         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1095     }
1096 }
1097 
1098 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1099                                        MemoryRegionSection *secion,
1100                                        hwaddr start, hwaddr size)
1101 {
1102     KVMState *s = kvm_state;
1103 
1104     if (s->coalesced_mmio) {
1105         struct kvm_coalesced_mmio_zone zone;
1106 
1107         zone.addr = start;
1108         zone.size = size;
1109         zone.pad = 0;
1110 
1111         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1112     }
1113 }
1114 
1115 static void kvm_coalesce_pio_add(MemoryListener *listener,
1116                                 MemoryRegionSection *section,
1117                                 hwaddr start, hwaddr size)
1118 {
1119     KVMState *s = kvm_state;
1120 
1121     if (s->coalesced_pio) {
1122         struct kvm_coalesced_mmio_zone zone;
1123 
1124         zone.addr = start;
1125         zone.size = size;
1126         zone.pio = 1;
1127 
1128         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1129     }
1130 }
1131 
1132 static void kvm_coalesce_pio_del(MemoryListener *listener,
1133                                 MemoryRegionSection *section,
1134                                 hwaddr start, hwaddr size)
1135 {
1136     KVMState *s = kvm_state;
1137 
1138     if (s->coalesced_pio) {
1139         struct kvm_coalesced_mmio_zone zone;
1140 
1141         zone.addr = start;
1142         zone.size = size;
1143         zone.pio = 1;
1144 
1145         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1146      }
1147 }
1148 
1149 static MemoryListener kvm_coalesced_pio_listener = {
1150     .name = "kvm-coalesced-pio",
1151     .coalesced_io_add = kvm_coalesce_pio_add,
1152     .coalesced_io_del = kvm_coalesce_pio_del,
1153 };
1154 
1155 int kvm_check_extension(KVMState *s, unsigned int extension)
1156 {
1157     int ret;
1158 
1159     ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1160     if (ret < 0) {
1161         ret = 0;
1162     }
1163 
1164     return ret;
1165 }
1166 
1167 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1168 {
1169     int ret;
1170 
1171     ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1172     if (ret < 0) {
1173         /* VM wide version not implemented, use global one instead */
1174         ret = kvm_check_extension(s, extension);
1175     }
1176 
1177     return ret;
1178 }
1179 
1180 typedef struct HWPoisonPage {
1181     ram_addr_t ram_addr;
1182     QLIST_ENTRY(HWPoisonPage) list;
1183 } HWPoisonPage;
1184 
1185 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1186     QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1187 
1188 static void kvm_unpoison_all(void *param)
1189 {
1190     HWPoisonPage *page, *next_page;
1191 
1192     QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1193         QLIST_REMOVE(page, list);
1194         qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
1195         g_free(page);
1196     }
1197 }
1198 
1199 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1200 {
1201     HWPoisonPage *page;
1202 
1203     QLIST_FOREACH(page, &hwpoison_page_list, list) {
1204         if (page->ram_addr == ram_addr) {
1205             return;
1206         }
1207     }
1208     page = g_new(HWPoisonPage, 1);
1209     page->ram_addr = ram_addr;
1210     QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1211 }
1212 
1213 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1214 {
1215 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1216     /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1217      * endianness, but the memory core hands them in target endianness.
1218      * For example, PPC is always treated as big-endian even if running
1219      * on KVM and on PPC64LE.  Correct here.
1220      */
1221     switch (size) {
1222     case 2:
1223         val = bswap16(val);
1224         break;
1225     case 4:
1226         val = bswap32(val);
1227         break;
1228     }
1229 #endif
1230     return val;
1231 }
1232 
1233 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1234                                   bool assign, uint32_t size, bool datamatch)
1235 {
1236     int ret;
1237     struct kvm_ioeventfd iofd = {
1238         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1239         .addr = addr,
1240         .len = size,
1241         .flags = 0,
1242         .fd = fd,
1243     };
1244 
1245     trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1246                                  datamatch);
1247     if (!kvm_enabled()) {
1248         return -ENOSYS;
1249     }
1250 
1251     if (datamatch) {
1252         iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1253     }
1254     if (!assign) {
1255         iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1256     }
1257 
1258     ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1259 
1260     if (ret < 0) {
1261         return -errno;
1262     }
1263 
1264     return 0;
1265 }
1266 
1267 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1268                                  bool assign, uint32_t size, bool datamatch)
1269 {
1270     struct kvm_ioeventfd kick = {
1271         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1272         .addr = addr,
1273         .flags = KVM_IOEVENTFD_FLAG_PIO,
1274         .len = size,
1275         .fd = fd,
1276     };
1277     int r;
1278     trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1279     if (!kvm_enabled()) {
1280         return -ENOSYS;
1281     }
1282     if (datamatch) {
1283         kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1284     }
1285     if (!assign) {
1286         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1287     }
1288     r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1289     if (r < 0) {
1290         return r;
1291     }
1292     return 0;
1293 }
1294 
1295 
1296 static int kvm_check_many_ioeventfds(void)
1297 {
1298     /* Userspace can use ioeventfd for io notification.  This requires a host
1299      * that supports eventfd(2) and an I/O thread; since eventfd does not
1300      * support SIGIO it cannot interrupt the vcpu.
1301      *
1302      * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
1303      * can avoid creating too many ioeventfds.
1304      */
1305 #if defined(CONFIG_EVENTFD)
1306     int ioeventfds[7];
1307     int i, ret = 0;
1308     for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
1309         ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
1310         if (ioeventfds[i] < 0) {
1311             break;
1312         }
1313         ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
1314         if (ret < 0) {
1315             close(ioeventfds[i]);
1316             break;
1317         }
1318     }
1319 
1320     /* Decide whether many devices are supported or not */
1321     ret = i == ARRAY_SIZE(ioeventfds);
1322 
1323     while (i-- > 0) {
1324         kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
1325         close(ioeventfds[i]);
1326     }
1327     return ret;
1328 #else
1329     return 0;
1330 #endif
1331 }
1332 
1333 static const KVMCapabilityInfo *
1334 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1335 {
1336     while (list->name) {
1337         if (!kvm_check_extension(s, list->value)) {
1338             return list;
1339         }
1340         list++;
1341     }
1342     return NULL;
1343 }
1344 
1345 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1346 {
1347     g_assert(
1348         ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1349     );
1350     kvm_max_slot_size = max_slot_size;
1351 }
1352 
1353 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1354                              MemoryRegionSection *section, bool add)
1355 {
1356     KVMSlot *mem;
1357     int err;
1358     MemoryRegion *mr = section->mr;
1359     bool writable = !mr->readonly && !mr->rom_device;
1360     hwaddr start_addr, size, slot_size, mr_offset;
1361     ram_addr_t ram_start_offset;
1362     void *ram;
1363 
1364     if (!memory_region_is_ram(mr)) {
1365         if (writable || !kvm_readonly_mem_allowed) {
1366             return;
1367         } else if (!mr->romd_mode) {
1368             /* If the memory device is not in romd_mode, then we actually want
1369              * to remove the kvm memory slot so all accesses will trap. */
1370             add = false;
1371         }
1372     }
1373 
1374     size = kvm_align_section(section, &start_addr);
1375     if (!size) {
1376         return;
1377     }
1378 
1379     /* The offset of the kvmslot within the memory region */
1380     mr_offset = section->offset_within_region + start_addr -
1381         section->offset_within_address_space;
1382 
1383     /* use aligned delta to align the ram address and offset */
1384     ram = memory_region_get_ram_ptr(mr) + mr_offset;
1385     ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1386 
1387     kvm_slots_lock();
1388 
1389     if (!add) {
1390         do {
1391             slot_size = MIN(kvm_max_slot_size, size);
1392             mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1393             if (!mem) {
1394                 goto out;
1395             }
1396             if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1397                 /*
1398                  * NOTE: We should be aware of the fact that here we're only
1399                  * doing a best effort to sync dirty bits.  No matter whether
1400                  * we're using dirty log or dirty ring, we ignored two facts:
1401                  *
1402                  * (1) dirty bits can reside in hardware buffers (PML)
1403                  *
1404                  * (2) after we collected dirty bits here, pages can be dirtied
1405                  * again before we do the final KVM_SET_USER_MEMORY_REGION to
1406                  * remove the slot.
1407                  *
1408                  * Not easy.  Let's cross the fingers until it's fixed.
1409                  */
1410                 if (kvm_state->kvm_dirty_ring_size) {
1411                     kvm_dirty_ring_reap_locked(kvm_state, NULL);
1412                 } else {
1413                     kvm_slot_get_dirty_log(kvm_state, mem);
1414                 }
1415                 kvm_slot_sync_dirty_pages(mem);
1416             }
1417 
1418             /* unregister the slot */
1419             g_free(mem->dirty_bmap);
1420             mem->dirty_bmap = NULL;
1421             mem->memory_size = 0;
1422             mem->flags = 0;
1423             err = kvm_set_user_memory_region(kml, mem, false);
1424             if (err) {
1425                 fprintf(stderr, "%s: error unregistering slot: %s\n",
1426                         __func__, strerror(-err));
1427                 abort();
1428             }
1429             start_addr += slot_size;
1430             size -= slot_size;
1431         } while (size);
1432         goto out;
1433     }
1434 
1435     /* register the new slot */
1436     do {
1437         slot_size = MIN(kvm_max_slot_size, size);
1438         mem = kvm_alloc_slot(kml);
1439         mem->as_id = kml->as_id;
1440         mem->memory_size = slot_size;
1441         mem->start_addr = start_addr;
1442         mem->ram_start_offset = ram_start_offset;
1443         mem->ram = ram;
1444         mem->flags = kvm_mem_flags(mr);
1445         kvm_slot_init_dirty_bitmap(mem);
1446         err = kvm_set_user_memory_region(kml, mem, true);
1447         if (err) {
1448             fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1449                     strerror(-err));
1450             abort();
1451         }
1452         start_addr += slot_size;
1453         ram_start_offset += slot_size;
1454         ram += slot_size;
1455         size -= slot_size;
1456     } while (size);
1457 
1458 out:
1459     kvm_slots_unlock();
1460 }
1461 
1462 static void *kvm_dirty_ring_reaper_thread(void *data)
1463 {
1464     KVMState *s = data;
1465     struct KVMDirtyRingReaper *r = &s->reaper;
1466 
1467     rcu_register_thread();
1468 
1469     trace_kvm_dirty_ring_reaper("init");
1470 
1471     while (true) {
1472         r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1473         trace_kvm_dirty_ring_reaper("wait");
1474         /*
1475          * TODO: provide a smarter timeout rather than a constant?
1476          */
1477         sleep(1);
1478 
1479         /* keep sleeping so that dirtylimit not be interfered by reaper */
1480         if (dirtylimit_in_service()) {
1481             continue;
1482         }
1483 
1484         trace_kvm_dirty_ring_reaper("wakeup");
1485         r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1486 
1487         qemu_mutex_lock_iothread();
1488         kvm_dirty_ring_reap(s, NULL);
1489         qemu_mutex_unlock_iothread();
1490 
1491         r->reaper_iteration++;
1492     }
1493 
1494     trace_kvm_dirty_ring_reaper("exit");
1495 
1496     rcu_unregister_thread();
1497 
1498     return NULL;
1499 }
1500 
1501 static int kvm_dirty_ring_reaper_init(KVMState *s)
1502 {
1503     struct KVMDirtyRingReaper *r = &s->reaper;
1504 
1505     qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1506                        kvm_dirty_ring_reaper_thread,
1507                        s, QEMU_THREAD_JOINABLE);
1508 
1509     return 0;
1510 }
1511 
1512 static void kvm_region_add(MemoryListener *listener,
1513                            MemoryRegionSection *section)
1514 {
1515     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1516 
1517     memory_region_ref(section->mr);
1518     kvm_set_phys_mem(kml, section, true);
1519 }
1520 
1521 static void kvm_region_del(MemoryListener *listener,
1522                            MemoryRegionSection *section)
1523 {
1524     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1525 
1526     kvm_set_phys_mem(kml, section, false);
1527     memory_region_unref(section->mr);
1528 }
1529 
1530 static void kvm_log_sync(MemoryListener *listener,
1531                          MemoryRegionSection *section)
1532 {
1533     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1534 
1535     kvm_slots_lock();
1536     kvm_physical_sync_dirty_bitmap(kml, section);
1537     kvm_slots_unlock();
1538 }
1539 
1540 static void kvm_log_sync_global(MemoryListener *l)
1541 {
1542     KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1543     KVMState *s = kvm_state;
1544     KVMSlot *mem;
1545     int i;
1546 
1547     /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1548     kvm_dirty_ring_flush();
1549 
1550     /*
1551      * TODO: make this faster when nr_slots is big while there are
1552      * only a few used slots (small VMs).
1553      */
1554     kvm_slots_lock();
1555     for (i = 0; i < s->nr_slots; i++) {
1556         mem = &kml->slots[i];
1557         if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1558             kvm_slot_sync_dirty_pages(mem);
1559             /*
1560              * This is not needed by KVM_GET_DIRTY_LOG because the
1561              * ioctl will unconditionally overwrite the whole region.
1562              * However kvm dirty ring has no such side effect.
1563              */
1564             kvm_slot_reset_dirty_pages(mem);
1565         }
1566     }
1567     kvm_slots_unlock();
1568 }
1569 
1570 static void kvm_log_clear(MemoryListener *listener,
1571                           MemoryRegionSection *section)
1572 {
1573     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1574     int r;
1575 
1576     r = kvm_physical_log_clear(kml, section);
1577     if (r < 0) {
1578         error_report_once("%s: kvm log clear failed: mr=%s "
1579                           "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1580                           section->mr->name, section->offset_within_region,
1581                           int128_get64(section->size));
1582         abort();
1583     }
1584 }
1585 
1586 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1587                                   MemoryRegionSection *section,
1588                                   bool match_data, uint64_t data,
1589                                   EventNotifier *e)
1590 {
1591     int fd = event_notifier_get_fd(e);
1592     int r;
1593 
1594     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1595                                data, true, int128_get64(section->size),
1596                                match_data);
1597     if (r < 0) {
1598         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1599                 __func__, strerror(-r), -r);
1600         abort();
1601     }
1602 }
1603 
1604 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1605                                   MemoryRegionSection *section,
1606                                   bool match_data, uint64_t data,
1607                                   EventNotifier *e)
1608 {
1609     int fd = event_notifier_get_fd(e);
1610     int r;
1611 
1612     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1613                                data, false, int128_get64(section->size),
1614                                match_data);
1615     if (r < 0) {
1616         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1617                 __func__, strerror(-r), -r);
1618         abort();
1619     }
1620 }
1621 
1622 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1623                                  MemoryRegionSection *section,
1624                                  bool match_data, uint64_t data,
1625                                  EventNotifier *e)
1626 {
1627     int fd = event_notifier_get_fd(e);
1628     int r;
1629 
1630     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1631                               data, true, int128_get64(section->size),
1632                               match_data);
1633     if (r < 0) {
1634         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1635                 __func__, strerror(-r), -r);
1636         abort();
1637     }
1638 }
1639 
1640 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1641                                  MemoryRegionSection *section,
1642                                  bool match_data, uint64_t data,
1643                                  EventNotifier *e)
1644 
1645 {
1646     int fd = event_notifier_get_fd(e);
1647     int r;
1648 
1649     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1650                               data, false, int128_get64(section->size),
1651                               match_data);
1652     if (r < 0) {
1653         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1654                 __func__, strerror(-r), -r);
1655         abort();
1656     }
1657 }
1658 
1659 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1660                                   AddressSpace *as, int as_id, const char *name)
1661 {
1662     int i;
1663 
1664     kml->slots = g_new0(KVMSlot, s->nr_slots);
1665     kml->as_id = as_id;
1666 
1667     for (i = 0; i < s->nr_slots; i++) {
1668         kml->slots[i].slot = i;
1669     }
1670 
1671     kml->listener.region_add = kvm_region_add;
1672     kml->listener.region_del = kvm_region_del;
1673     kml->listener.log_start = kvm_log_start;
1674     kml->listener.log_stop = kvm_log_stop;
1675     kml->listener.priority = 10;
1676     kml->listener.name = name;
1677 
1678     if (s->kvm_dirty_ring_size) {
1679         kml->listener.log_sync_global = kvm_log_sync_global;
1680     } else {
1681         kml->listener.log_sync = kvm_log_sync;
1682         kml->listener.log_clear = kvm_log_clear;
1683     }
1684 
1685     memory_listener_register(&kml->listener, as);
1686 
1687     for (i = 0; i < s->nr_as; ++i) {
1688         if (!s->as[i].as) {
1689             s->as[i].as = as;
1690             s->as[i].ml = kml;
1691             break;
1692         }
1693     }
1694 }
1695 
1696 static MemoryListener kvm_io_listener = {
1697     .name = "kvm-io",
1698     .eventfd_add = kvm_io_ioeventfd_add,
1699     .eventfd_del = kvm_io_ioeventfd_del,
1700     .priority = 10,
1701 };
1702 
1703 int kvm_set_irq(KVMState *s, int irq, int level)
1704 {
1705     struct kvm_irq_level event;
1706     int ret;
1707 
1708     assert(kvm_async_interrupts_enabled());
1709 
1710     event.level = level;
1711     event.irq = irq;
1712     ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1713     if (ret < 0) {
1714         perror("kvm_set_irq");
1715         abort();
1716     }
1717 
1718     return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1719 }
1720 
1721 #ifdef KVM_CAP_IRQ_ROUTING
1722 typedef struct KVMMSIRoute {
1723     struct kvm_irq_routing_entry kroute;
1724     QTAILQ_ENTRY(KVMMSIRoute) entry;
1725 } KVMMSIRoute;
1726 
1727 static void set_gsi(KVMState *s, unsigned int gsi)
1728 {
1729     set_bit(gsi, s->used_gsi_bitmap);
1730 }
1731 
1732 static void clear_gsi(KVMState *s, unsigned int gsi)
1733 {
1734     clear_bit(gsi, s->used_gsi_bitmap);
1735 }
1736 
1737 void kvm_init_irq_routing(KVMState *s)
1738 {
1739     int gsi_count, i;
1740 
1741     gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1742     if (gsi_count > 0) {
1743         /* Round up so we can search ints using ffs */
1744         s->used_gsi_bitmap = bitmap_new(gsi_count);
1745         s->gsi_count = gsi_count;
1746     }
1747 
1748     s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1749     s->nr_allocated_irq_routes = 0;
1750 
1751     if (!kvm_direct_msi_allowed) {
1752         for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1753             QTAILQ_INIT(&s->msi_hashtab[i]);
1754         }
1755     }
1756 
1757     kvm_arch_init_irq_routing(s);
1758 }
1759 
1760 void kvm_irqchip_commit_routes(KVMState *s)
1761 {
1762     int ret;
1763 
1764     if (kvm_gsi_direct_mapping()) {
1765         return;
1766     }
1767 
1768     if (!kvm_gsi_routing_enabled()) {
1769         return;
1770     }
1771 
1772     s->irq_routes->flags = 0;
1773     trace_kvm_irqchip_commit_routes();
1774     ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1775     assert(ret == 0);
1776 }
1777 
1778 static void kvm_add_routing_entry(KVMState *s,
1779                                   struct kvm_irq_routing_entry *entry)
1780 {
1781     struct kvm_irq_routing_entry *new;
1782     int n, size;
1783 
1784     if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1785         n = s->nr_allocated_irq_routes * 2;
1786         if (n < 64) {
1787             n = 64;
1788         }
1789         size = sizeof(struct kvm_irq_routing);
1790         size += n * sizeof(*new);
1791         s->irq_routes = g_realloc(s->irq_routes, size);
1792         s->nr_allocated_irq_routes = n;
1793     }
1794     n = s->irq_routes->nr++;
1795     new = &s->irq_routes->entries[n];
1796 
1797     *new = *entry;
1798 
1799     set_gsi(s, entry->gsi);
1800 }
1801 
1802 static int kvm_update_routing_entry(KVMState *s,
1803                                     struct kvm_irq_routing_entry *new_entry)
1804 {
1805     struct kvm_irq_routing_entry *entry;
1806     int n;
1807 
1808     for (n = 0; n < s->irq_routes->nr; n++) {
1809         entry = &s->irq_routes->entries[n];
1810         if (entry->gsi != new_entry->gsi) {
1811             continue;
1812         }
1813 
1814         if(!memcmp(entry, new_entry, sizeof *entry)) {
1815             return 0;
1816         }
1817 
1818         *entry = *new_entry;
1819 
1820         return 0;
1821     }
1822 
1823     return -ESRCH;
1824 }
1825 
1826 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1827 {
1828     struct kvm_irq_routing_entry e = {};
1829 
1830     assert(pin < s->gsi_count);
1831 
1832     e.gsi = irq;
1833     e.type = KVM_IRQ_ROUTING_IRQCHIP;
1834     e.flags = 0;
1835     e.u.irqchip.irqchip = irqchip;
1836     e.u.irqchip.pin = pin;
1837     kvm_add_routing_entry(s, &e);
1838 }
1839 
1840 void kvm_irqchip_release_virq(KVMState *s, int virq)
1841 {
1842     struct kvm_irq_routing_entry *e;
1843     int i;
1844 
1845     if (kvm_gsi_direct_mapping()) {
1846         return;
1847     }
1848 
1849     for (i = 0; i < s->irq_routes->nr; i++) {
1850         e = &s->irq_routes->entries[i];
1851         if (e->gsi == virq) {
1852             s->irq_routes->nr--;
1853             *e = s->irq_routes->entries[s->irq_routes->nr];
1854         }
1855     }
1856     clear_gsi(s, virq);
1857     kvm_arch_release_virq_post(virq);
1858     trace_kvm_irqchip_release_virq(virq);
1859 }
1860 
1861 void kvm_irqchip_add_change_notifier(Notifier *n)
1862 {
1863     notifier_list_add(&kvm_irqchip_change_notifiers, n);
1864 }
1865 
1866 void kvm_irqchip_remove_change_notifier(Notifier *n)
1867 {
1868     notifier_remove(n);
1869 }
1870 
1871 void kvm_irqchip_change_notify(void)
1872 {
1873     notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
1874 }
1875 
1876 static unsigned int kvm_hash_msi(uint32_t data)
1877 {
1878     /* This is optimized for IA32 MSI layout. However, no other arch shall
1879      * repeat the mistake of not providing a direct MSI injection API. */
1880     return data & 0xff;
1881 }
1882 
1883 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1884 {
1885     KVMMSIRoute *route, *next;
1886     unsigned int hash;
1887 
1888     for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1889         QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1890             kvm_irqchip_release_virq(s, route->kroute.gsi);
1891             QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1892             g_free(route);
1893         }
1894     }
1895 }
1896 
1897 static int kvm_irqchip_get_virq(KVMState *s)
1898 {
1899     int next_virq;
1900 
1901     /*
1902      * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1903      * GSI numbers are more than the number of IRQ route. Allocating a GSI
1904      * number can succeed even though a new route entry cannot be added.
1905      * When this happens, flush dynamic MSI entries to free IRQ route entries.
1906      */
1907     if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1908         kvm_flush_dynamic_msi_routes(s);
1909     }
1910 
1911     /* Return the lowest unused GSI in the bitmap */
1912     next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1913     if (next_virq >= s->gsi_count) {
1914         return -ENOSPC;
1915     } else {
1916         return next_virq;
1917     }
1918 }
1919 
1920 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1921 {
1922     unsigned int hash = kvm_hash_msi(msg.data);
1923     KVMMSIRoute *route;
1924 
1925     QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1926         if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1927             route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1928             route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1929             return route;
1930         }
1931     }
1932     return NULL;
1933 }
1934 
1935 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1936 {
1937     struct kvm_msi msi;
1938     KVMMSIRoute *route;
1939 
1940     if (kvm_direct_msi_allowed) {
1941         msi.address_lo = (uint32_t)msg.address;
1942         msi.address_hi = msg.address >> 32;
1943         msi.data = le32_to_cpu(msg.data);
1944         msi.flags = 0;
1945         memset(msi.pad, 0, sizeof(msi.pad));
1946 
1947         return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1948     }
1949 
1950     route = kvm_lookup_msi_route(s, msg);
1951     if (!route) {
1952         int virq;
1953 
1954         virq = kvm_irqchip_get_virq(s);
1955         if (virq < 0) {
1956             return virq;
1957         }
1958 
1959         route = g_new0(KVMMSIRoute, 1);
1960         route->kroute.gsi = virq;
1961         route->kroute.type = KVM_IRQ_ROUTING_MSI;
1962         route->kroute.flags = 0;
1963         route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1964         route->kroute.u.msi.address_hi = msg.address >> 32;
1965         route->kroute.u.msi.data = le32_to_cpu(msg.data);
1966 
1967         kvm_add_routing_entry(s, &route->kroute);
1968         kvm_irqchip_commit_routes(s);
1969 
1970         QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1971                            entry);
1972     }
1973 
1974     assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1975 
1976     return kvm_set_irq(s, route->kroute.gsi, 1);
1977 }
1978 
1979 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
1980 {
1981     struct kvm_irq_routing_entry kroute = {};
1982     int virq;
1983     KVMState *s = c->s;
1984     MSIMessage msg = {0, 0};
1985 
1986     if (pci_available && dev) {
1987         msg = pci_get_msi_message(dev, vector);
1988     }
1989 
1990     if (kvm_gsi_direct_mapping()) {
1991         return kvm_arch_msi_data_to_gsi(msg.data);
1992     }
1993 
1994     if (!kvm_gsi_routing_enabled()) {
1995         return -ENOSYS;
1996     }
1997 
1998     virq = kvm_irqchip_get_virq(s);
1999     if (virq < 0) {
2000         return virq;
2001     }
2002 
2003     kroute.gsi = virq;
2004     kroute.type = KVM_IRQ_ROUTING_MSI;
2005     kroute.flags = 0;
2006     kroute.u.msi.address_lo = (uint32_t)msg.address;
2007     kroute.u.msi.address_hi = msg.address >> 32;
2008     kroute.u.msi.data = le32_to_cpu(msg.data);
2009     if (pci_available && kvm_msi_devid_required()) {
2010         kroute.flags = KVM_MSI_VALID_DEVID;
2011         kroute.u.msi.devid = pci_requester_id(dev);
2012     }
2013     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2014         kvm_irqchip_release_virq(s, virq);
2015         return -EINVAL;
2016     }
2017 
2018     trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2019                                     vector, virq);
2020 
2021     kvm_add_routing_entry(s, &kroute);
2022     kvm_arch_add_msi_route_post(&kroute, vector, dev);
2023     c->changes++;
2024 
2025     return virq;
2026 }
2027 
2028 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2029                                  PCIDevice *dev)
2030 {
2031     struct kvm_irq_routing_entry kroute = {};
2032 
2033     if (kvm_gsi_direct_mapping()) {
2034         return 0;
2035     }
2036 
2037     if (!kvm_irqchip_in_kernel()) {
2038         return -ENOSYS;
2039     }
2040 
2041     kroute.gsi = virq;
2042     kroute.type = KVM_IRQ_ROUTING_MSI;
2043     kroute.flags = 0;
2044     kroute.u.msi.address_lo = (uint32_t)msg.address;
2045     kroute.u.msi.address_hi = msg.address >> 32;
2046     kroute.u.msi.data = le32_to_cpu(msg.data);
2047     if (pci_available && kvm_msi_devid_required()) {
2048         kroute.flags = KVM_MSI_VALID_DEVID;
2049         kroute.u.msi.devid = pci_requester_id(dev);
2050     }
2051     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2052         return -EINVAL;
2053     }
2054 
2055     trace_kvm_irqchip_update_msi_route(virq);
2056 
2057     return kvm_update_routing_entry(s, &kroute);
2058 }
2059 
2060 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2061                                     EventNotifier *resample, int virq,
2062                                     bool assign)
2063 {
2064     int fd = event_notifier_get_fd(event);
2065     int rfd = resample ? event_notifier_get_fd(resample) : -1;
2066 
2067     struct kvm_irqfd irqfd = {
2068         .fd = fd,
2069         .gsi = virq,
2070         .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2071     };
2072 
2073     if (rfd != -1) {
2074         assert(assign);
2075         if (kvm_irqchip_is_split()) {
2076             /*
2077              * When the slow irqchip (e.g. IOAPIC) is in the
2078              * userspace, KVM kernel resamplefd will not work because
2079              * the EOI of the interrupt will be delivered to userspace
2080              * instead, so the KVM kernel resamplefd kick will be
2081              * skipped.  The userspace here mimics what the kernel
2082              * provides with resamplefd, remember the resamplefd and
2083              * kick it when we receive EOI of this IRQ.
2084              *
2085              * This is hackery because IOAPIC is mostly bypassed
2086              * (except EOI broadcasts) when irqfd is used.  However
2087              * this can bring much performance back for split irqchip
2088              * with INTx IRQs (for VFIO, this gives 93% perf of the
2089              * full fast path, which is 46% perf boost comparing to
2090              * the INTx slow path).
2091              */
2092             kvm_resample_fd_insert(virq, resample);
2093         } else {
2094             irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2095             irqfd.resamplefd = rfd;
2096         }
2097     } else if (!assign) {
2098         if (kvm_irqchip_is_split()) {
2099             kvm_resample_fd_remove(virq);
2100         }
2101     }
2102 
2103     if (!kvm_irqfds_enabled()) {
2104         return -ENOSYS;
2105     }
2106 
2107     return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2108 }
2109 
2110 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2111 {
2112     struct kvm_irq_routing_entry kroute = {};
2113     int virq;
2114 
2115     if (!kvm_gsi_routing_enabled()) {
2116         return -ENOSYS;
2117     }
2118 
2119     virq = kvm_irqchip_get_virq(s);
2120     if (virq < 0) {
2121         return virq;
2122     }
2123 
2124     kroute.gsi = virq;
2125     kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
2126     kroute.flags = 0;
2127     kroute.u.adapter.summary_addr = adapter->summary_addr;
2128     kroute.u.adapter.ind_addr = adapter->ind_addr;
2129     kroute.u.adapter.summary_offset = adapter->summary_offset;
2130     kroute.u.adapter.ind_offset = adapter->ind_offset;
2131     kroute.u.adapter.adapter_id = adapter->adapter_id;
2132 
2133     kvm_add_routing_entry(s, &kroute);
2134 
2135     return virq;
2136 }
2137 
2138 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2139 {
2140     struct kvm_irq_routing_entry kroute = {};
2141     int virq;
2142 
2143     if (!kvm_gsi_routing_enabled()) {
2144         return -ENOSYS;
2145     }
2146     if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
2147         return -ENOSYS;
2148     }
2149     virq = kvm_irqchip_get_virq(s);
2150     if (virq < 0) {
2151         return virq;
2152     }
2153 
2154     kroute.gsi = virq;
2155     kroute.type = KVM_IRQ_ROUTING_HV_SINT;
2156     kroute.flags = 0;
2157     kroute.u.hv_sint.vcpu = vcpu;
2158     kroute.u.hv_sint.sint = sint;
2159 
2160     kvm_add_routing_entry(s, &kroute);
2161     kvm_irqchip_commit_routes(s);
2162 
2163     return virq;
2164 }
2165 
2166 #else /* !KVM_CAP_IRQ_ROUTING */
2167 
2168 void kvm_init_irq_routing(KVMState *s)
2169 {
2170 }
2171 
2172 void kvm_irqchip_release_virq(KVMState *s, int virq)
2173 {
2174 }
2175 
2176 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2177 {
2178     abort();
2179 }
2180 
2181 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2182 {
2183     return -ENOSYS;
2184 }
2185 
2186 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2187 {
2188     return -ENOSYS;
2189 }
2190 
2191 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2192 {
2193     return -ENOSYS;
2194 }
2195 
2196 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2197                                     EventNotifier *resample, int virq,
2198                                     bool assign)
2199 {
2200     abort();
2201 }
2202 
2203 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2204 {
2205     return -ENOSYS;
2206 }
2207 #endif /* !KVM_CAP_IRQ_ROUTING */
2208 
2209 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2210                                        EventNotifier *rn, int virq)
2211 {
2212     return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2213 }
2214 
2215 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2216                                           int virq)
2217 {
2218     return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2219 }
2220 
2221 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2222                                    EventNotifier *rn, qemu_irq irq)
2223 {
2224     gpointer key, gsi;
2225     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2226 
2227     if (!found) {
2228         return -ENXIO;
2229     }
2230     return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2231 }
2232 
2233 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2234                                       qemu_irq irq)
2235 {
2236     gpointer key, gsi;
2237     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2238 
2239     if (!found) {
2240         return -ENXIO;
2241     }
2242     return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2243 }
2244 
2245 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2246 {
2247     g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2248 }
2249 
2250 static void kvm_irqchip_create(KVMState *s)
2251 {
2252     int ret;
2253 
2254     assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2255     if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2256         ;
2257     } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2258         ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2259         if (ret < 0) {
2260             fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2261             exit(1);
2262         }
2263     } else {
2264         return;
2265     }
2266 
2267     /* First probe and see if there's a arch-specific hook to create the
2268      * in-kernel irqchip for us */
2269     ret = kvm_arch_irqchip_create(s);
2270     if (ret == 0) {
2271         if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2272             error_report("Split IRQ chip mode not supported.");
2273             exit(1);
2274         } else {
2275             ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2276         }
2277     }
2278     if (ret < 0) {
2279         fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2280         exit(1);
2281     }
2282 
2283     kvm_kernel_irqchip = true;
2284     /* If we have an in-kernel IRQ chip then we must have asynchronous
2285      * interrupt delivery (though the reverse is not necessarily true)
2286      */
2287     kvm_async_interrupts_allowed = true;
2288     kvm_halt_in_kernel_allowed = true;
2289 
2290     kvm_init_irq_routing(s);
2291 
2292     s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2293 }
2294 
2295 /* Find number of supported CPUs using the recommended
2296  * procedure from the kernel API documentation to cope with
2297  * older kernels that may be missing capabilities.
2298  */
2299 static int kvm_recommended_vcpus(KVMState *s)
2300 {
2301     int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2302     return (ret) ? ret : 4;
2303 }
2304 
2305 static int kvm_max_vcpus(KVMState *s)
2306 {
2307     int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
2308     return (ret) ? ret : kvm_recommended_vcpus(s);
2309 }
2310 
2311 static int kvm_max_vcpu_id(KVMState *s)
2312 {
2313     int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2314     return (ret) ? ret : kvm_max_vcpus(s);
2315 }
2316 
2317 bool kvm_vcpu_id_is_valid(int vcpu_id)
2318 {
2319     KVMState *s = KVM_STATE(current_accel());
2320     return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2321 }
2322 
2323 bool kvm_dirty_ring_enabled(void)
2324 {
2325     return kvm_state->kvm_dirty_ring_size ? true : false;
2326 }
2327 
2328 static void query_stats_cb(StatsResultList **result, StatsTarget target,
2329                            strList *names, strList *targets, Error **errp);
2330 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2331 
2332 uint32_t kvm_dirty_ring_size(void)
2333 {
2334     return kvm_state->kvm_dirty_ring_size;
2335 }
2336 
2337 static int kvm_init(MachineState *ms)
2338 {
2339     MachineClass *mc = MACHINE_GET_CLASS(ms);
2340     static const char upgrade_note[] =
2341         "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2342         "(see http://sourceforge.net/projects/kvm).\n";
2343     struct {
2344         const char *name;
2345         int num;
2346     } num_cpus[] = {
2347         { "SMP",          ms->smp.cpus },
2348         { "hotpluggable", ms->smp.max_cpus },
2349         { NULL, }
2350     }, *nc = num_cpus;
2351     int soft_vcpus_limit, hard_vcpus_limit;
2352     KVMState *s;
2353     const KVMCapabilityInfo *missing_cap;
2354     int ret;
2355     int type = 0;
2356     uint64_t dirty_log_manual_caps;
2357 
2358     qemu_mutex_init(&kml_slots_lock);
2359 
2360     s = KVM_STATE(ms->accelerator);
2361 
2362     /*
2363      * On systems where the kernel can support different base page
2364      * sizes, host page size may be different from TARGET_PAGE_SIZE,
2365      * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
2366      * page size for the system though.
2367      */
2368     assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2369 
2370     s->sigmask_len = 8;
2371 
2372 #ifdef KVM_CAP_SET_GUEST_DEBUG
2373     QTAILQ_INIT(&s->kvm_sw_breakpoints);
2374 #endif
2375     QLIST_INIT(&s->kvm_parked_vcpus);
2376     s->fd = qemu_open_old("/dev/kvm", O_RDWR);
2377     if (s->fd == -1) {
2378         fprintf(stderr, "Could not access KVM kernel module: %m\n");
2379         ret = -errno;
2380         goto err;
2381     }
2382 
2383     ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2384     if (ret < KVM_API_VERSION) {
2385         if (ret >= 0) {
2386             ret = -EINVAL;
2387         }
2388         fprintf(stderr, "kvm version too old\n");
2389         goto err;
2390     }
2391 
2392     if (ret > KVM_API_VERSION) {
2393         ret = -EINVAL;
2394         fprintf(stderr, "kvm version not supported\n");
2395         goto err;
2396     }
2397 
2398     kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2399     s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2400 
2401     /* If unspecified, use the default value */
2402     if (!s->nr_slots) {
2403         s->nr_slots = 32;
2404     }
2405 
2406     s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2407     if (s->nr_as <= 1) {
2408         s->nr_as = 1;
2409     }
2410     s->as = g_new0(struct KVMAs, s->nr_as);
2411 
2412     if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2413         g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
2414                                                             "kvm-type",
2415                                                             &error_abort);
2416         type = mc->kvm_type(ms, kvm_type);
2417     } else if (mc->kvm_type) {
2418         type = mc->kvm_type(ms, NULL);
2419     }
2420 
2421     do {
2422         ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2423     } while (ret == -EINTR);
2424 
2425     if (ret < 0) {
2426         fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
2427                 strerror(-ret));
2428 
2429 #ifdef TARGET_S390X
2430         if (ret == -EINVAL) {
2431             fprintf(stderr,
2432                     "Host kernel setup problem detected. Please verify:\n");
2433             fprintf(stderr, "- for kernels supporting the switch_amode or"
2434                     " user_mode parameters, whether\n");
2435             fprintf(stderr,
2436                     "  user space is running in primary address space\n");
2437             fprintf(stderr,
2438                     "- for kernels supporting the vm.allocate_pgste sysctl, "
2439                     "whether it is enabled\n");
2440         }
2441 #elif defined(TARGET_PPC)
2442         if (ret == -EINVAL) {
2443             fprintf(stderr,
2444                     "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2445                     (type == 2) ? "pr" : "hv");
2446         }
2447 #endif
2448         goto err;
2449     }
2450 
2451     s->vmfd = ret;
2452 
2453     /* check the vcpu limits */
2454     soft_vcpus_limit = kvm_recommended_vcpus(s);
2455     hard_vcpus_limit = kvm_max_vcpus(s);
2456 
2457     while (nc->name) {
2458         if (nc->num > soft_vcpus_limit) {
2459             warn_report("Number of %s cpus requested (%d) exceeds "
2460                         "the recommended cpus supported by KVM (%d)",
2461                         nc->name, nc->num, soft_vcpus_limit);
2462 
2463             if (nc->num > hard_vcpus_limit) {
2464                 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
2465                         "the maximum cpus supported by KVM (%d)\n",
2466                         nc->name, nc->num, hard_vcpus_limit);
2467                 exit(1);
2468             }
2469         }
2470         nc++;
2471     }
2472 
2473     missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2474     if (!missing_cap) {
2475         missing_cap =
2476             kvm_check_extension_list(s, kvm_arch_required_capabilities);
2477     }
2478     if (missing_cap) {
2479         ret = -EINVAL;
2480         fprintf(stderr, "kvm does not support %s\n%s",
2481                 missing_cap->name, upgrade_note);
2482         goto err;
2483     }
2484 
2485     s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2486     s->coalesced_pio = s->coalesced_mmio &&
2487                        kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2488 
2489     /*
2490      * Enable KVM dirty ring if supported, otherwise fall back to
2491      * dirty logging mode
2492      */
2493     if (s->kvm_dirty_ring_size > 0) {
2494         uint64_t ring_bytes;
2495 
2496         ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
2497 
2498         /* Read the max supported pages */
2499         ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING);
2500         if (ret > 0) {
2501             if (ring_bytes > ret) {
2502                 error_report("KVM dirty ring size %" PRIu32 " too big "
2503                              "(maximum is %ld).  Please use a smaller value.",
2504                              s->kvm_dirty_ring_size,
2505                              (long)ret / sizeof(struct kvm_dirty_gfn));
2506                 ret = -EINVAL;
2507                 goto err;
2508             }
2509 
2510             ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
2511             if (ret) {
2512                 error_report("Enabling of KVM dirty ring failed: %s. "
2513                              "Suggested minimum value is 1024.", strerror(-ret));
2514                 goto err;
2515             }
2516 
2517             s->kvm_dirty_ring_bytes = ring_bytes;
2518          } else {
2519              warn_report("KVM dirty ring not available, using bitmap method");
2520              s->kvm_dirty_ring_size = 0;
2521         }
2522     }
2523 
2524     /*
2525      * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2526      * enabled.  More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2527      * page is wr-protected initially, which is against how kvm dirty ring is
2528      * usage - kvm dirty ring requires all pages are wr-protected at the very
2529      * beginning.  Enabling this feature for dirty ring causes data corruption.
2530      *
2531      * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2532      * we may expect a higher stall time when starting the migration.  In the
2533      * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2534      * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2535      * guest pages.
2536      */
2537     if (!s->kvm_dirty_ring_size) {
2538         dirty_log_manual_caps =
2539             kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2540         dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2541                                   KVM_DIRTY_LOG_INITIALLY_SET);
2542         s->manual_dirty_log_protect = dirty_log_manual_caps;
2543         if (dirty_log_manual_caps) {
2544             ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2545                                     dirty_log_manual_caps);
2546             if (ret) {
2547                 warn_report("Trying to enable capability %"PRIu64" of "
2548                             "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2549                             "Falling back to the legacy mode. ",
2550                             dirty_log_manual_caps);
2551                 s->manual_dirty_log_protect = 0;
2552             }
2553         }
2554     }
2555 
2556 #ifdef KVM_CAP_VCPU_EVENTS
2557     s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2558 #endif
2559 
2560     s->robust_singlestep =
2561         kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
2562 
2563 #ifdef KVM_CAP_DEBUGREGS
2564     s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
2565 #endif
2566 
2567     s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2568 
2569 #ifdef KVM_CAP_IRQ_ROUTING
2570     kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
2571 #endif
2572 
2573     s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
2574 
2575     s->irq_set_ioctl = KVM_IRQ_LINE;
2576     if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2577         s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2578     }
2579 
2580     kvm_readonly_mem_allowed =
2581         (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2582 
2583     kvm_eventfds_allowed =
2584         (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
2585 
2586     kvm_irqfds_allowed =
2587         (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
2588 
2589     kvm_resamplefds_allowed =
2590         (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2591 
2592     kvm_vm_attributes_allowed =
2593         (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2594 
2595     kvm_ioeventfd_any_length_allowed =
2596         (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
2597 
2598 #ifdef KVM_CAP_SET_GUEST_DEBUG
2599     kvm_has_guest_debug =
2600         (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2601 #endif
2602 
2603     kvm_sstep_flags = 0;
2604     if (kvm_has_guest_debug) {
2605         kvm_sstep_flags = SSTEP_ENABLE;
2606 
2607 #if defined KVM_CAP_SET_GUEST_DEBUG2
2608         int guest_debug_flags =
2609             kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2610 
2611         if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2612             kvm_sstep_flags |= SSTEP_NOIRQ;
2613         }
2614 #endif
2615     }
2616 
2617     kvm_state = s;
2618 
2619     ret = kvm_arch_init(ms, s);
2620     if (ret < 0) {
2621         goto err;
2622     }
2623 
2624     if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2625         s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2626     }
2627 
2628     qemu_register_reset(kvm_unpoison_all, NULL);
2629 
2630     if (s->kernel_irqchip_allowed) {
2631         kvm_irqchip_create(s);
2632     }
2633 
2634     if (kvm_eventfds_allowed) {
2635         s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2636         s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2637     }
2638     s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2639     s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2640 
2641     kvm_memory_listener_register(s, &s->memory_listener,
2642                                  &address_space_memory, 0, "kvm-memory");
2643     if (kvm_eventfds_allowed) {
2644         memory_listener_register(&kvm_io_listener,
2645                                  &address_space_io);
2646     }
2647     memory_listener_register(&kvm_coalesced_pio_listener,
2648                              &address_space_io);
2649 
2650     s->many_ioeventfds = kvm_check_many_ioeventfds();
2651 
2652     s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2653     if (!s->sync_mmu) {
2654         ret = ram_block_discard_disable(true);
2655         assert(!ret);
2656     }
2657 
2658     if (s->kvm_dirty_ring_size) {
2659         ret = kvm_dirty_ring_reaper_init(s);
2660         if (ret) {
2661             goto err;
2662         }
2663     }
2664 
2665     if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2666         add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2667                             query_stats_schemas_cb);
2668     }
2669 
2670     return 0;
2671 
2672 err:
2673     assert(ret < 0);
2674     if (s->vmfd >= 0) {
2675         close(s->vmfd);
2676     }
2677     if (s->fd != -1) {
2678         close(s->fd);
2679     }
2680     g_free(s->memory_listener.slots);
2681 
2682     return ret;
2683 }
2684 
2685 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2686 {
2687     s->sigmask_len = sigmask_len;
2688 }
2689 
2690 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2691                           int size, uint32_t count)
2692 {
2693     int i;
2694     uint8_t *ptr = data;
2695 
2696     for (i = 0; i < count; i++) {
2697         address_space_rw(&address_space_io, port, attrs,
2698                          ptr, size,
2699                          direction == KVM_EXIT_IO_OUT);
2700         ptr += size;
2701     }
2702 }
2703 
2704 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2705 {
2706     fprintf(stderr, "KVM internal error. Suberror: %d\n",
2707             run->internal.suberror);
2708 
2709     if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
2710         int i;
2711 
2712         for (i = 0; i < run->internal.ndata; ++i) {
2713             fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2714                     i, (uint64_t)run->internal.data[i]);
2715         }
2716     }
2717     if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2718         fprintf(stderr, "emulation failure\n");
2719         if (!kvm_arch_stop_on_emulation_error(cpu)) {
2720             cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2721             return EXCP_INTERRUPT;
2722         }
2723     }
2724     /* FIXME: Should trigger a qmp message to let management know
2725      * something went wrong.
2726      */
2727     return -1;
2728 }
2729 
2730 void kvm_flush_coalesced_mmio_buffer(void)
2731 {
2732     KVMState *s = kvm_state;
2733 
2734     if (s->coalesced_flush_in_progress) {
2735         return;
2736     }
2737 
2738     s->coalesced_flush_in_progress = true;
2739 
2740     if (s->coalesced_mmio_ring) {
2741         struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2742         while (ring->first != ring->last) {
2743             struct kvm_coalesced_mmio *ent;
2744 
2745             ent = &ring->coalesced_mmio[ring->first];
2746 
2747             if (ent->pio == 1) {
2748                 address_space_write(&address_space_io, ent->phys_addr,
2749                                     MEMTXATTRS_UNSPECIFIED, ent->data,
2750                                     ent->len);
2751             } else {
2752                 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2753             }
2754             smp_wmb();
2755             ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2756         }
2757     }
2758 
2759     s->coalesced_flush_in_progress = false;
2760 }
2761 
2762 bool kvm_cpu_check_are_resettable(void)
2763 {
2764     return kvm_arch_cpu_check_are_resettable();
2765 }
2766 
2767 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2768 {
2769     if (!cpu->vcpu_dirty) {
2770         kvm_arch_get_registers(cpu);
2771         cpu->vcpu_dirty = true;
2772     }
2773 }
2774 
2775 void kvm_cpu_synchronize_state(CPUState *cpu)
2776 {
2777     if (!cpu->vcpu_dirty) {
2778         run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2779     }
2780 }
2781 
2782 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2783 {
2784     kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2785     cpu->vcpu_dirty = false;
2786 }
2787 
2788 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2789 {
2790     run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2791 }
2792 
2793 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2794 {
2795     kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2796     cpu->vcpu_dirty = false;
2797 }
2798 
2799 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2800 {
2801     run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2802 }
2803 
2804 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2805 {
2806     cpu->vcpu_dirty = true;
2807 }
2808 
2809 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2810 {
2811     run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2812 }
2813 
2814 #ifdef KVM_HAVE_MCE_INJECTION
2815 static __thread void *pending_sigbus_addr;
2816 static __thread int pending_sigbus_code;
2817 static __thread bool have_sigbus_pending;
2818 #endif
2819 
2820 static void kvm_cpu_kick(CPUState *cpu)
2821 {
2822     qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2823 }
2824 
2825 static void kvm_cpu_kick_self(void)
2826 {
2827     if (kvm_immediate_exit) {
2828         kvm_cpu_kick(current_cpu);
2829     } else {
2830         qemu_cpu_kick_self();
2831     }
2832 }
2833 
2834 static void kvm_eat_signals(CPUState *cpu)
2835 {
2836     struct timespec ts = { 0, 0 };
2837     siginfo_t siginfo;
2838     sigset_t waitset;
2839     sigset_t chkset;
2840     int r;
2841 
2842     if (kvm_immediate_exit) {
2843         qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2844         /* Write kvm_run->immediate_exit before the cpu->exit_request
2845          * write in kvm_cpu_exec.
2846          */
2847         smp_wmb();
2848         return;
2849     }
2850 
2851     sigemptyset(&waitset);
2852     sigaddset(&waitset, SIG_IPI);
2853 
2854     do {
2855         r = sigtimedwait(&waitset, &siginfo, &ts);
2856         if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2857             perror("sigtimedwait");
2858             exit(1);
2859         }
2860 
2861         r = sigpending(&chkset);
2862         if (r == -1) {
2863             perror("sigpending");
2864             exit(1);
2865         }
2866     } while (sigismember(&chkset, SIG_IPI));
2867 }
2868 
2869 int kvm_cpu_exec(CPUState *cpu)
2870 {
2871     struct kvm_run *run = cpu->kvm_run;
2872     int ret, run_ret;
2873 
2874     DPRINTF("kvm_cpu_exec()\n");
2875 
2876     if (kvm_arch_process_async_events(cpu)) {
2877         qatomic_set(&cpu->exit_request, 0);
2878         return EXCP_HLT;
2879     }
2880 
2881     qemu_mutex_unlock_iothread();
2882     cpu_exec_start(cpu);
2883 
2884     do {
2885         MemTxAttrs attrs;
2886 
2887         if (cpu->vcpu_dirty) {
2888             kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2889             cpu->vcpu_dirty = false;
2890         }
2891 
2892         kvm_arch_pre_run(cpu, run);
2893         if (qatomic_read(&cpu->exit_request)) {
2894             DPRINTF("interrupt exit requested\n");
2895             /*
2896              * KVM requires us to reenter the kernel after IO exits to complete
2897              * instruction emulation. This self-signal will ensure that we
2898              * leave ASAP again.
2899              */
2900             kvm_cpu_kick_self();
2901         }
2902 
2903         /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2904          * Matching barrier in kvm_eat_signals.
2905          */
2906         smp_rmb();
2907 
2908         run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2909 
2910         attrs = kvm_arch_post_run(cpu, run);
2911 
2912 #ifdef KVM_HAVE_MCE_INJECTION
2913         if (unlikely(have_sigbus_pending)) {
2914             qemu_mutex_lock_iothread();
2915             kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
2916                                     pending_sigbus_addr);
2917             have_sigbus_pending = false;
2918             qemu_mutex_unlock_iothread();
2919         }
2920 #endif
2921 
2922         if (run_ret < 0) {
2923             if (run_ret == -EINTR || run_ret == -EAGAIN) {
2924                 DPRINTF("io window exit\n");
2925                 kvm_eat_signals(cpu);
2926                 ret = EXCP_INTERRUPT;
2927                 break;
2928             }
2929             fprintf(stderr, "error: kvm run failed %s\n",
2930                     strerror(-run_ret));
2931 #ifdef TARGET_PPC
2932             if (run_ret == -EBUSY) {
2933                 fprintf(stderr,
2934                         "This is probably because your SMT is enabled.\n"
2935                         "VCPU can only run on primary threads with all "
2936                         "secondary threads offline.\n");
2937             }
2938 #endif
2939             ret = -1;
2940             break;
2941         }
2942 
2943         trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2944         switch (run->exit_reason) {
2945         case KVM_EXIT_IO:
2946             DPRINTF("handle_io\n");
2947             /* Called outside BQL */
2948             kvm_handle_io(run->io.port, attrs,
2949                           (uint8_t *)run + run->io.data_offset,
2950                           run->io.direction,
2951                           run->io.size,
2952                           run->io.count);
2953             ret = 0;
2954             break;
2955         case KVM_EXIT_MMIO:
2956             DPRINTF("handle_mmio\n");
2957             /* Called outside BQL */
2958             address_space_rw(&address_space_memory,
2959                              run->mmio.phys_addr, attrs,
2960                              run->mmio.data,
2961                              run->mmio.len,
2962                              run->mmio.is_write);
2963             ret = 0;
2964             break;
2965         case KVM_EXIT_IRQ_WINDOW_OPEN:
2966             DPRINTF("irq_window_open\n");
2967             ret = EXCP_INTERRUPT;
2968             break;
2969         case KVM_EXIT_SHUTDOWN:
2970             DPRINTF("shutdown\n");
2971             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2972             ret = EXCP_INTERRUPT;
2973             break;
2974         case KVM_EXIT_UNKNOWN:
2975             fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2976                     (uint64_t)run->hw.hardware_exit_reason);
2977             ret = -1;
2978             break;
2979         case KVM_EXIT_INTERNAL_ERROR:
2980             ret = kvm_handle_internal_error(cpu, run);
2981             break;
2982         case KVM_EXIT_DIRTY_RING_FULL:
2983             /*
2984              * We shouldn't continue if the dirty ring of this vcpu is
2985              * still full.  Got kicked by KVM_RESET_DIRTY_RINGS.
2986              */
2987             trace_kvm_dirty_ring_full(cpu->cpu_index);
2988             qemu_mutex_lock_iothread();
2989             /*
2990              * We throttle vCPU by making it sleep once it exit from kernel
2991              * due to dirty ring full. In the dirtylimit scenario, reaping
2992              * all vCPUs after a single vCPU dirty ring get full result in
2993              * the miss of sleep, so just reap the ring-fulled vCPU.
2994              */
2995             if (dirtylimit_in_service()) {
2996                 kvm_dirty_ring_reap(kvm_state, cpu);
2997             } else {
2998                 kvm_dirty_ring_reap(kvm_state, NULL);
2999             }
3000             qemu_mutex_unlock_iothread();
3001             dirtylimit_vcpu_execute(cpu);
3002             ret = 0;
3003             break;
3004         case KVM_EXIT_SYSTEM_EVENT:
3005             switch (run->system_event.type) {
3006             case KVM_SYSTEM_EVENT_SHUTDOWN:
3007                 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
3008                 ret = EXCP_INTERRUPT;
3009                 break;
3010             case KVM_SYSTEM_EVENT_RESET:
3011                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3012                 ret = EXCP_INTERRUPT;
3013                 break;
3014             case KVM_SYSTEM_EVENT_CRASH:
3015                 kvm_cpu_synchronize_state(cpu);
3016                 qemu_mutex_lock_iothread();
3017                 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3018                 qemu_mutex_unlock_iothread();
3019                 ret = 0;
3020                 break;
3021             default:
3022                 DPRINTF("kvm_arch_handle_exit\n");
3023                 ret = kvm_arch_handle_exit(cpu, run);
3024                 break;
3025             }
3026             break;
3027         default:
3028             DPRINTF("kvm_arch_handle_exit\n");
3029             ret = kvm_arch_handle_exit(cpu, run);
3030             break;
3031         }
3032     } while (ret == 0);
3033 
3034     cpu_exec_end(cpu);
3035     qemu_mutex_lock_iothread();
3036 
3037     if (ret < 0) {
3038         cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
3039         vm_stop(RUN_STATE_INTERNAL_ERROR);
3040     }
3041 
3042     qatomic_set(&cpu->exit_request, 0);
3043     return ret;
3044 }
3045 
3046 int kvm_ioctl(KVMState *s, int type, ...)
3047 {
3048     int ret;
3049     void *arg;
3050     va_list ap;
3051 
3052     va_start(ap, type);
3053     arg = va_arg(ap, void *);
3054     va_end(ap);
3055 
3056     trace_kvm_ioctl(type, arg);
3057     ret = ioctl(s->fd, type, arg);
3058     if (ret == -1) {
3059         ret = -errno;
3060     }
3061     return ret;
3062 }
3063 
3064 int kvm_vm_ioctl(KVMState *s, int type, ...)
3065 {
3066     int ret;
3067     void *arg;
3068     va_list ap;
3069 
3070     va_start(ap, type);
3071     arg = va_arg(ap, void *);
3072     va_end(ap);
3073 
3074     trace_kvm_vm_ioctl(type, arg);
3075     ret = ioctl(s->vmfd, type, arg);
3076     if (ret == -1) {
3077         ret = -errno;
3078     }
3079     return ret;
3080 }
3081 
3082 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
3083 {
3084     int ret;
3085     void *arg;
3086     va_list ap;
3087 
3088     va_start(ap, type);
3089     arg = va_arg(ap, void *);
3090     va_end(ap);
3091 
3092     trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3093     ret = ioctl(cpu->kvm_fd, type, arg);
3094     if (ret == -1) {
3095         ret = -errno;
3096     }
3097     return ret;
3098 }
3099 
3100 int kvm_device_ioctl(int fd, int type, ...)
3101 {
3102     int ret;
3103     void *arg;
3104     va_list ap;
3105 
3106     va_start(ap, type);
3107     arg = va_arg(ap, void *);
3108     va_end(ap);
3109 
3110     trace_kvm_device_ioctl(fd, type, arg);
3111     ret = ioctl(fd, type, arg);
3112     if (ret == -1) {
3113         ret = -errno;
3114     }
3115     return ret;
3116 }
3117 
3118 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3119 {
3120     int ret;
3121     struct kvm_device_attr attribute = {
3122         .group = group,
3123         .attr = attr,
3124     };
3125 
3126     if (!kvm_vm_attributes_allowed) {
3127         return 0;
3128     }
3129 
3130     ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3131     /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3132     return ret ? 0 : 1;
3133 }
3134 
3135 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3136 {
3137     struct kvm_device_attr attribute = {
3138         .group = group,
3139         .attr = attr,
3140         .flags = 0,
3141     };
3142 
3143     return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3144 }
3145 
3146 int kvm_device_access(int fd, int group, uint64_t attr,
3147                       void *val, bool write, Error **errp)
3148 {
3149     struct kvm_device_attr kvmattr;
3150     int err;
3151 
3152     kvmattr.flags = 0;
3153     kvmattr.group = group;
3154     kvmattr.attr = attr;
3155     kvmattr.addr = (uintptr_t)val;
3156 
3157     err = kvm_device_ioctl(fd,
3158                            write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3159                            &kvmattr);
3160     if (err < 0) {
3161         error_setg_errno(errp, -err,
3162                          "KVM_%s_DEVICE_ATTR failed: Group %d "
3163                          "attr 0x%016" PRIx64,
3164                          write ? "SET" : "GET", group, attr);
3165     }
3166     return err;
3167 }
3168 
3169 bool kvm_has_sync_mmu(void)
3170 {
3171     return kvm_state->sync_mmu;
3172 }
3173 
3174 int kvm_has_vcpu_events(void)
3175 {
3176     return kvm_state->vcpu_events;
3177 }
3178 
3179 int kvm_has_robust_singlestep(void)
3180 {
3181     return kvm_state->robust_singlestep;
3182 }
3183 
3184 int kvm_has_debugregs(void)
3185 {
3186     return kvm_state->debugregs;
3187 }
3188 
3189 int kvm_max_nested_state_length(void)
3190 {
3191     return kvm_state->max_nested_state_len;
3192 }
3193 
3194 int kvm_has_many_ioeventfds(void)
3195 {
3196     if (!kvm_enabled()) {
3197         return 0;
3198     }
3199     return kvm_state->many_ioeventfds;
3200 }
3201 
3202 int kvm_has_gsi_routing(void)
3203 {
3204 #ifdef KVM_CAP_IRQ_ROUTING
3205     return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3206 #else
3207     return false;
3208 #endif
3209 }
3210 
3211 int kvm_has_intx_set_mask(void)
3212 {
3213     return kvm_state->intx_set_mask;
3214 }
3215 
3216 bool kvm_arm_supports_user_irq(void)
3217 {
3218     return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3219 }
3220 
3221 #ifdef KVM_CAP_SET_GUEST_DEBUG
3222 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
3223                                                  target_ulong pc)
3224 {
3225     struct kvm_sw_breakpoint *bp;
3226 
3227     QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3228         if (bp->pc == pc) {
3229             return bp;
3230         }
3231     }
3232     return NULL;
3233 }
3234 
3235 int kvm_sw_breakpoints_active(CPUState *cpu)
3236 {
3237     return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3238 }
3239 
3240 struct kvm_set_guest_debug_data {
3241     struct kvm_guest_debug dbg;
3242     int err;
3243 };
3244 
3245 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3246 {
3247     struct kvm_set_guest_debug_data *dbg_data =
3248         (struct kvm_set_guest_debug_data *) data.host_ptr;
3249 
3250     dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3251                                    &dbg_data->dbg);
3252 }
3253 
3254 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3255 {
3256     struct kvm_set_guest_debug_data data;
3257 
3258     data.dbg.control = reinject_trap;
3259 
3260     if (cpu->singlestep_enabled) {
3261         data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3262 
3263         if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3264             data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3265         }
3266     }
3267     kvm_arch_update_guest_debug(cpu, &data.dbg);
3268 
3269     run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3270                RUN_ON_CPU_HOST_PTR(&data));
3271     return data.err;
3272 }
3273 
3274 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
3275                           target_ulong len, int type)
3276 {
3277     struct kvm_sw_breakpoint *bp;
3278     int err;
3279 
3280     if (type == GDB_BREAKPOINT_SW) {
3281         bp = kvm_find_sw_breakpoint(cpu, addr);
3282         if (bp) {
3283             bp->use_count++;
3284             return 0;
3285         }
3286 
3287         bp = g_new(struct kvm_sw_breakpoint, 1);
3288         bp->pc = addr;
3289         bp->use_count = 1;
3290         err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3291         if (err) {
3292             g_free(bp);
3293             return err;
3294         }
3295 
3296         QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3297     } else {
3298         err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3299         if (err) {
3300             return err;
3301         }
3302     }
3303 
3304     CPU_FOREACH(cpu) {
3305         err = kvm_update_guest_debug(cpu, 0);
3306         if (err) {
3307             return err;
3308         }
3309     }
3310     return 0;
3311 }
3312 
3313 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
3314                           target_ulong len, int type)
3315 {
3316     struct kvm_sw_breakpoint *bp;
3317     int err;
3318 
3319     if (type == GDB_BREAKPOINT_SW) {
3320         bp = kvm_find_sw_breakpoint(cpu, addr);
3321         if (!bp) {
3322             return -ENOENT;
3323         }
3324 
3325         if (bp->use_count > 1) {
3326             bp->use_count--;
3327             return 0;
3328         }
3329 
3330         err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3331         if (err) {
3332             return err;
3333         }
3334 
3335         QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3336         g_free(bp);
3337     } else {
3338         err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3339         if (err) {
3340             return err;
3341         }
3342     }
3343 
3344     CPU_FOREACH(cpu) {
3345         err = kvm_update_guest_debug(cpu, 0);
3346         if (err) {
3347             return err;
3348         }
3349     }
3350     return 0;
3351 }
3352 
3353 void kvm_remove_all_breakpoints(CPUState *cpu)
3354 {
3355     struct kvm_sw_breakpoint *bp, *next;
3356     KVMState *s = cpu->kvm_state;
3357     CPUState *tmpcpu;
3358 
3359     QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3360         if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3361             /* Try harder to find a CPU that currently sees the breakpoint. */
3362             CPU_FOREACH(tmpcpu) {
3363                 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3364                     break;
3365                 }
3366             }
3367         }
3368         QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3369         g_free(bp);
3370     }
3371     kvm_arch_remove_all_hw_breakpoints();
3372 
3373     CPU_FOREACH(cpu) {
3374         kvm_update_guest_debug(cpu, 0);
3375     }
3376 }
3377 
3378 #else /* !KVM_CAP_SET_GUEST_DEBUG */
3379 
3380 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3381 {
3382     return -EINVAL;
3383 }
3384 
3385 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
3386                           target_ulong len, int type)
3387 {
3388     return -EINVAL;
3389 }
3390 
3391 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
3392                           target_ulong len, int type)
3393 {
3394     return -EINVAL;
3395 }
3396 
3397 void kvm_remove_all_breakpoints(CPUState *cpu)
3398 {
3399 }
3400 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
3401 
3402 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3403 {
3404     KVMState *s = kvm_state;
3405     struct kvm_signal_mask *sigmask;
3406     int r;
3407 
3408     sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3409 
3410     sigmask->len = s->sigmask_len;
3411     memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3412     r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3413     g_free(sigmask);
3414 
3415     return r;
3416 }
3417 
3418 static void kvm_ipi_signal(int sig)
3419 {
3420     if (current_cpu) {
3421         assert(kvm_immediate_exit);
3422         kvm_cpu_kick(current_cpu);
3423     }
3424 }
3425 
3426 void kvm_init_cpu_signals(CPUState *cpu)
3427 {
3428     int r;
3429     sigset_t set;
3430     struct sigaction sigact;
3431 
3432     memset(&sigact, 0, sizeof(sigact));
3433     sigact.sa_handler = kvm_ipi_signal;
3434     sigaction(SIG_IPI, &sigact, NULL);
3435 
3436     pthread_sigmask(SIG_BLOCK, NULL, &set);
3437 #if defined KVM_HAVE_MCE_INJECTION
3438     sigdelset(&set, SIGBUS);
3439     pthread_sigmask(SIG_SETMASK, &set, NULL);
3440 #endif
3441     sigdelset(&set, SIG_IPI);
3442     if (kvm_immediate_exit) {
3443         r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3444     } else {
3445         r = kvm_set_signal_mask(cpu, &set);
3446     }
3447     if (r) {
3448         fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3449         exit(1);
3450     }
3451 }
3452 
3453 /* Called asynchronously in VCPU thread.  */
3454 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3455 {
3456 #ifdef KVM_HAVE_MCE_INJECTION
3457     if (have_sigbus_pending) {
3458         return 1;
3459     }
3460     have_sigbus_pending = true;
3461     pending_sigbus_addr = addr;
3462     pending_sigbus_code = code;
3463     qatomic_set(&cpu->exit_request, 1);
3464     return 0;
3465 #else
3466     return 1;
3467 #endif
3468 }
3469 
3470 /* Called synchronously (via signalfd) in main thread.  */
3471 int kvm_on_sigbus(int code, void *addr)
3472 {
3473 #ifdef KVM_HAVE_MCE_INJECTION
3474     /* Action required MCE kills the process if SIGBUS is blocked.  Because
3475      * that's what happens in the I/O thread, where we handle MCE via signalfd,
3476      * we can only get action optional here.
3477      */
3478     assert(code != BUS_MCEERR_AR);
3479     kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3480     return 0;
3481 #else
3482     return 1;
3483 #endif
3484 }
3485 
3486 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3487 {
3488     int ret;
3489     struct kvm_create_device create_dev;
3490 
3491     create_dev.type = type;
3492     create_dev.fd = -1;
3493     create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3494 
3495     if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3496         return -ENOTSUP;
3497     }
3498 
3499     ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3500     if (ret) {
3501         return ret;
3502     }
3503 
3504     return test ? 0 : create_dev.fd;
3505 }
3506 
3507 bool kvm_device_supported(int vmfd, uint64_t type)
3508 {
3509     struct kvm_create_device create_dev = {
3510         .type = type,
3511         .fd = -1,
3512         .flags = KVM_CREATE_DEVICE_TEST,
3513     };
3514 
3515     if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3516         return false;
3517     }
3518 
3519     return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3520 }
3521 
3522 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3523 {
3524     struct kvm_one_reg reg;
3525     int r;
3526 
3527     reg.id = id;
3528     reg.addr = (uintptr_t) source;
3529     r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
3530     if (r) {
3531         trace_kvm_failed_reg_set(id, strerror(-r));
3532     }
3533     return r;
3534 }
3535 
3536 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3537 {
3538     struct kvm_one_reg reg;
3539     int r;
3540 
3541     reg.id = id;
3542     reg.addr = (uintptr_t) target;
3543     r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
3544     if (r) {
3545         trace_kvm_failed_reg_get(id, strerror(-r));
3546     }
3547     return r;
3548 }
3549 
3550 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3551                                  hwaddr start_addr, hwaddr size)
3552 {
3553     KVMState *kvm = KVM_STATE(ms->accelerator);
3554     int i;
3555 
3556     for (i = 0; i < kvm->nr_as; ++i) {
3557         if (kvm->as[i].as == as && kvm->as[i].ml) {
3558             size = MIN(kvm_max_slot_size, size);
3559             return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3560                                                     start_addr, size);
3561         }
3562     }
3563 
3564     return false;
3565 }
3566 
3567 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3568                                    const char *name, void *opaque,
3569                                    Error **errp)
3570 {
3571     KVMState *s = KVM_STATE(obj);
3572     int64_t value = s->kvm_shadow_mem;
3573 
3574     visit_type_int(v, name, &value, errp);
3575 }
3576 
3577 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3578                                    const char *name, void *opaque,
3579                                    Error **errp)
3580 {
3581     KVMState *s = KVM_STATE(obj);
3582     int64_t value;
3583 
3584     if (s->fd != -1) {
3585         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3586         return;
3587     }
3588 
3589     if (!visit_type_int(v, name, &value, errp)) {
3590         return;
3591     }
3592 
3593     s->kvm_shadow_mem = value;
3594 }
3595 
3596 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3597                                    const char *name, void *opaque,
3598                                    Error **errp)
3599 {
3600     KVMState *s = KVM_STATE(obj);
3601     OnOffSplit mode;
3602 
3603     if (s->fd != -1) {
3604         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3605         return;
3606     }
3607 
3608     if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3609         return;
3610     }
3611     switch (mode) {
3612     case ON_OFF_SPLIT_ON:
3613         s->kernel_irqchip_allowed = true;
3614         s->kernel_irqchip_required = true;
3615         s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3616         break;
3617     case ON_OFF_SPLIT_OFF:
3618         s->kernel_irqchip_allowed = false;
3619         s->kernel_irqchip_required = false;
3620         s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3621         break;
3622     case ON_OFF_SPLIT_SPLIT:
3623         s->kernel_irqchip_allowed = true;
3624         s->kernel_irqchip_required = true;
3625         s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3626         break;
3627     default:
3628         /* The value was checked in visit_type_OnOffSplit() above. If
3629          * we get here, then something is wrong in QEMU.
3630          */
3631         abort();
3632     }
3633 }
3634 
3635 bool kvm_kernel_irqchip_allowed(void)
3636 {
3637     return kvm_state->kernel_irqchip_allowed;
3638 }
3639 
3640 bool kvm_kernel_irqchip_required(void)
3641 {
3642     return kvm_state->kernel_irqchip_required;
3643 }
3644 
3645 bool kvm_kernel_irqchip_split(void)
3646 {
3647     return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3648 }
3649 
3650 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3651                                     const char *name, void *opaque,
3652                                     Error **errp)
3653 {
3654     KVMState *s = KVM_STATE(obj);
3655     uint32_t value = s->kvm_dirty_ring_size;
3656 
3657     visit_type_uint32(v, name, &value, errp);
3658 }
3659 
3660 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3661                                     const char *name, void *opaque,
3662                                     Error **errp)
3663 {
3664     KVMState *s = KVM_STATE(obj);
3665     Error *error = NULL;
3666     uint32_t value;
3667 
3668     if (s->fd != -1) {
3669         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3670         return;
3671     }
3672 
3673     visit_type_uint32(v, name, &value, &error);
3674     if (error) {
3675         error_propagate(errp, error);
3676         return;
3677     }
3678     if (value & (value - 1)) {
3679         error_setg(errp, "dirty-ring-size must be a power of two.");
3680         return;
3681     }
3682 
3683     s->kvm_dirty_ring_size = value;
3684 }
3685 
3686 static void kvm_accel_instance_init(Object *obj)
3687 {
3688     KVMState *s = KVM_STATE(obj);
3689 
3690     s->fd = -1;
3691     s->vmfd = -1;
3692     s->kvm_shadow_mem = -1;
3693     s->kernel_irqchip_allowed = true;
3694     s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3695     /* KVM dirty ring is by default off */
3696     s->kvm_dirty_ring_size = 0;
3697 }
3698 
3699 static void kvm_accel_class_init(ObjectClass *oc, void *data)
3700 {
3701     AccelClass *ac = ACCEL_CLASS(oc);
3702     ac->name = "KVM";
3703     ac->init_machine = kvm_init;
3704     ac->has_memory = kvm_accel_has_memory;
3705     ac->allowed = &kvm_allowed;
3706 
3707     object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3708         NULL, kvm_set_kernel_irqchip,
3709         NULL, NULL);
3710     object_class_property_set_description(oc, "kernel-irqchip",
3711         "Configure KVM in-kernel irqchip");
3712 
3713     object_class_property_add(oc, "kvm-shadow-mem", "int",
3714         kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3715         NULL, NULL);
3716     object_class_property_set_description(oc, "kvm-shadow-mem",
3717         "KVM shadow MMU size");
3718 
3719     object_class_property_add(oc, "dirty-ring-size", "uint32",
3720         kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
3721         NULL, NULL);
3722     object_class_property_set_description(oc, "dirty-ring-size",
3723         "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3724 }
3725 
3726 static const TypeInfo kvm_accel_type = {
3727     .name = TYPE_KVM_ACCEL,
3728     .parent = TYPE_ACCEL,
3729     .instance_init = kvm_accel_instance_init,
3730     .class_init = kvm_accel_class_init,
3731     .instance_size = sizeof(KVMState),
3732 };
3733 
3734 static void kvm_type_init(void)
3735 {
3736     type_register_static(&kvm_accel_type);
3737 }
3738 
3739 type_init(kvm_type_init);
3740 
3741 typedef struct StatsArgs {
3742     union StatsResultsType {
3743         StatsResultList **stats;
3744         StatsSchemaList **schema;
3745     } result;
3746     strList *names;
3747     Error **errp;
3748 } StatsArgs;
3749 
3750 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
3751                                     uint64_t *stats_data,
3752                                     StatsList *stats_list,
3753                                     Error **errp)
3754 {
3755 
3756     Stats *stats;
3757     uint64List *val_list = NULL;
3758 
3759     /* Only add stats that we understand.  */
3760     switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3761     case KVM_STATS_TYPE_CUMULATIVE:
3762     case KVM_STATS_TYPE_INSTANT:
3763     case KVM_STATS_TYPE_PEAK:
3764     case KVM_STATS_TYPE_LINEAR_HIST:
3765     case KVM_STATS_TYPE_LOG_HIST:
3766         break;
3767     default:
3768         return stats_list;
3769     }
3770 
3771     switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3772     case KVM_STATS_UNIT_NONE:
3773     case KVM_STATS_UNIT_BYTES:
3774     case KVM_STATS_UNIT_CYCLES:
3775     case KVM_STATS_UNIT_SECONDS:
3776     case KVM_STATS_UNIT_BOOLEAN:
3777         break;
3778     default:
3779         return stats_list;
3780     }
3781 
3782     switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3783     case KVM_STATS_BASE_POW10:
3784     case KVM_STATS_BASE_POW2:
3785         break;
3786     default:
3787         return stats_list;
3788     }
3789 
3790     /* Alloc and populate data list */
3791     stats = g_new0(Stats, 1);
3792     stats->name = g_strdup(pdesc->name);
3793     stats->value = g_new0(StatsValue, 1);;
3794 
3795     if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
3796         stats->value->u.boolean = *stats_data;
3797         stats->value->type = QTYPE_QBOOL;
3798     } else if (pdesc->size == 1) {
3799         stats->value->u.scalar = *stats_data;
3800         stats->value->type = QTYPE_QNUM;
3801     } else {
3802         int i;
3803         for (i = 0; i < pdesc->size; i++) {
3804             QAPI_LIST_PREPEND(val_list, stats_data[i]);
3805         }
3806         stats->value->u.list = val_list;
3807         stats->value->type = QTYPE_QLIST;
3808     }
3809 
3810     QAPI_LIST_PREPEND(stats_list, stats);
3811     return stats_list;
3812 }
3813 
3814 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
3815                                                  StatsSchemaValueList *list,
3816                                                  Error **errp)
3817 {
3818     StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
3819     schema_entry->value = g_new0(StatsSchemaValue, 1);
3820 
3821     switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3822     case KVM_STATS_TYPE_CUMULATIVE:
3823         schema_entry->value->type = STATS_TYPE_CUMULATIVE;
3824         break;
3825     case KVM_STATS_TYPE_INSTANT:
3826         schema_entry->value->type = STATS_TYPE_INSTANT;
3827         break;
3828     case KVM_STATS_TYPE_PEAK:
3829         schema_entry->value->type = STATS_TYPE_PEAK;
3830         break;
3831     case KVM_STATS_TYPE_LINEAR_HIST:
3832         schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
3833         schema_entry->value->bucket_size = pdesc->bucket_size;
3834         schema_entry->value->has_bucket_size = true;
3835         break;
3836     case KVM_STATS_TYPE_LOG_HIST:
3837         schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
3838         break;
3839     default:
3840         goto exit;
3841     }
3842 
3843     switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3844     case KVM_STATS_UNIT_NONE:
3845         break;
3846     case KVM_STATS_UNIT_BOOLEAN:
3847         schema_entry->value->has_unit = true;
3848         schema_entry->value->unit = STATS_UNIT_BOOLEAN;
3849         break;
3850     case KVM_STATS_UNIT_BYTES:
3851         schema_entry->value->has_unit = true;
3852         schema_entry->value->unit = STATS_UNIT_BYTES;
3853         break;
3854     case KVM_STATS_UNIT_CYCLES:
3855         schema_entry->value->has_unit = true;
3856         schema_entry->value->unit = STATS_UNIT_CYCLES;
3857         break;
3858     case KVM_STATS_UNIT_SECONDS:
3859         schema_entry->value->has_unit = true;
3860         schema_entry->value->unit = STATS_UNIT_SECONDS;
3861         break;
3862     default:
3863         goto exit;
3864     }
3865 
3866     schema_entry->value->exponent = pdesc->exponent;
3867     if (pdesc->exponent) {
3868         switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3869         case KVM_STATS_BASE_POW10:
3870             schema_entry->value->has_base = true;
3871             schema_entry->value->base = 10;
3872             break;
3873         case KVM_STATS_BASE_POW2:
3874             schema_entry->value->has_base = true;
3875             schema_entry->value->base = 2;
3876             break;
3877         default:
3878             goto exit;
3879         }
3880     }
3881 
3882     schema_entry->value->name = g_strdup(pdesc->name);
3883     schema_entry->next = list;
3884     return schema_entry;
3885 exit:
3886     g_free(schema_entry->value);
3887     g_free(schema_entry);
3888     return list;
3889 }
3890 
3891 /* Cached stats descriptors */
3892 typedef struct StatsDescriptors {
3893     const char *ident; /* cache key, currently the StatsTarget */
3894     struct kvm_stats_desc *kvm_stats_desc;
3895     struct kvm_stats_header *kvm_stats_header;
3896     QTAILQ_ENTRY(StatsDescriptors) next;
3897 } StatsDescriptors;
3898 
3899 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
3900     QTAILQ_HEAD_INITIALIZER(stats_descriptors);
3901 
3902 /*
3903  * Return the descriptors for 'target', that either have already been read
3904  * or are retrieved from 'stats_fd'.
3905  */
3906 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
3907                                                 Error **errp)
3908 {
3909     StatsDescriptors *descriptors;
3910     const char *ident;
3911     struct kvm_stats_desc *kvm_stats_desc;
3912     struct kvm_stats_header *kvm_stats_header;
3913     size_t size_desc;
3914     ssize_t ret;
3915 
3916     ident = StatsTarget_str(target);
3917     QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
3918         if (g_str_equal(descriptors->ident, ident)) {
3919             return descriptors;
3920         }
3921     }
3922 
3923     descriptors = g_new0(StatsDescriptors, 1);
3924 
3925     /* Read stats header */
3926     kvm_stats_header = g_malloc(sizeof(*kvm_stats_header));
3927     ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header));
3928     if (ret != sizeof(*kvm_stats_header)) {
3929         error_setg(errp, "KVM stats: failed to read stats header: "
3930                    "expected %zu actual %zu",
3931                    sizeof(*kvm_stats_header), ret);
3932         g_free(descriptors);
3933         return NULL;
3934     }
3935     size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
3936 
3937     /* Read stats descriptors */
3938     kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
3939     ret = pread(stats_fd, kvm_stats_desc,
3940                 size_desc * kvm_stats_header->num_desc,
3941                 kvm_stats_header->desc_offset);
3942 
3943     if (ret != size_desc * kvm_stats_header->num_desc) {
3944         error_setg(errp, "KVM stats: failed to read stats descriptors: "
3945                    "expected %zu actual %zu",
3946                    size_desc * kvm_stats_header->num_desc, ret);
3947         g_free(descriptors);
3948         g_free(kvm_stats_desc);
3949         return NULL;
3950     }
3951     descriptors->kvm_stats_header = kvm_stats_header;
3952     descriptors->kvm_stats_desc = kvm_stats_desc;
3953     descriptors->ident = ident;
3954     QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
3955     return descriptors;
3956 }
3957 
3958 static void query_stats(StatsResultList **result, StatsTarget target,
3959                         strList *names, int stats_fd, Error **errp)
3960 {
3961     struct kvm_stats_desc *kvm_stats_desc;
3962     struct kvm_stats_header *kvm_stats_header;
3963     StatsDescriptors *descriptors;
3964     g_autofree uint64_t *stats_data = NULL;
3965     struct kvm_stats_desc *pdesc;
3966     StatsList *stats_list = NULL;
3967     size_t size_desc, size_data = 0;
3968     ssize_t ret;
3969     int i;
3970 
3971     descriptors = find_stats_descriptors(target, stats_fd, errp);
3972     if (!descriptors) {
3973         return;
3974     }
3975 
3976     kvm_stats_header = descriptors->kvm_stats_header;
3977     kvm_stats_desc = descriptors->kvm_stats_desc;
3978     size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
3979 
3980     /* Tally the total data size; read schema data */
3981     for (i = 0; i < kvm_stats_header->num_desc; ++i) {
3982         pdesc = (void *)kvm_stats_desc + i * size_desc;
3983         size_data += pdesc->size * sizeof(*stats_data);
3984     }
3985 
3986     stats_data = g_malloc0(size_data);
3987     ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
3988 
3989     if (ret != size_data) {
3990         error_setg(errp, "KVM stats: failed to read data: "
3991                    "expected %zu actual %zu", size_data, ret);
3992         return;
3993     }
3994 
3995     for (i = 0; i < kvm_stats_header->num_desc; ++i) {
3996         uint64_t *stats;
3997         pdesc = (void *)kvm_stats_desc + i * size_desc;
3998 
3999         /* Add entry to the list */
4000         stats = (void *)stats_data + pdesc->offset;
4001         if (!apply_str_list_filter(pdesc->name, names)) {
4002             continue;
4003         }
4004         stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
4005     }
4006 
4007     if (!stats_list) {
4008         return;
4009     }
4010 
4011     switch (target) {
4012     case STATS_TARGET_VM:
4013         add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
4014         break;
4015     case STATS_TARGET_VCPU:
4016         add_stats_entry(result, STATS_PROVIDER_KVM,
4017                         current_cpu->parent_obj.canonical_path,
4018                         stats_list);
4019         break;
4020     default:
4021         g_assert_not_reached();
4022     }
4023 }
4024 
4025 static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
4026                                int stats_fd, Error **errp)
4027 {
4028     struct kvm_stats_desc *kvm_stats_desc;
4029     struct kvm_stats_header *kvm_stats_header;
4030     StatsDescriptors *descriptors;
4031     struct kvm_stats_desc *pdesc;
4032     StatsSchemaValueList *stats_list = NULL;
4033     size_t size_desc;
4034     int i;
4035 
4036     descriptors = find_stats_descriptors(target, stats_fd, errp);
4037     if (!descriptors) {
4038         return;
4039     }
4040 
4041     kvm_stats_header = descriptors->kvm_stats_header;
4042     kvm_stats_desc = descriptors->kvm_stats_desc;
4043     size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4044 
4045     /* Tally the total data size; read schema data */
4046     for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4047         pdesc = (void *)kvm_stats_desc + i * size_desc;
4048         stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
4049     }
4050 
4051     add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
4052 }
4053 
4054 static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
4055 {
4056     StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
4057     int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
4058     Error *local_err = NULL;
4059 
4060     if (stats_fd == -1) {
4061         error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4062         error_propagate(kvm_stats_args->errp, local_err);
4063         return;
4064     }
4065     query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4066                 kvm_stats_args->names, stats_fd, kvm_stats_args->errp);
4067     close(stats_fd);
4068 }
4069 
4070 static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
4071 {
4072     StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
4073     int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
4074     Error *local_err = NULL;
4075 
4076     if (stats_fd == -1) {
4077         error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4078         error_propagate(kvm_stats_args->errp, local_err);
4079         return;
4080     }
4081     query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4082                        kvm_stats_args->errp);
4083     close(stats_fd);
4084 }
4085 
4086 static void query_stats_cb(StatsResultList **result, StatsTarget target,
4087                            strList *names, strList *targets, Error **errp)
4088 {
4089     KVMState *s = kvm_state;
4090     CPUState *cpu;
4091     int stats_fd;
4092 
4093     switch (target) {
4094     case STATS_TARGET_VM:
4095     {
4096         stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4097         if (stats_fd == -1) {
4098             error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4099             return;
4100         }
4101         query_stats(result, target, names, stats_fd, errp);
4102         close(stats_fd);
4103         break;
4104     }
4105     case STATS_TARGET_VCPU:
4106     {
4107         StatsArgs stats_args;
4108         stats_args.result.stats = result;
4109         stats_args.names = names;
4110         stats_args.errp = errp;
4111         CPU_FOREACH(cpu) {
4112             if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4113                 continue;
4114             }
4115             run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
4116         }
4117         break;
4118     }
4119     default:
4120         break;
4121     }
4122 }
4123 
4124 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4125 {
4126     StatsArgs stats_args;
4127     KVMState *s = kvm_state;
4128     int stats_fd;
4129 
4130     stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4131     if (stats_fd == -1) {
4132         error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4133         return;
4134     }
4135     query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4136     close(stats_fd);
4137 
4138     if (first_cpu) {
4139         stats_args.result.schema = result;
4140         stats_args.errp = errp;
4141         run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
4142     }
4143 }
4144