xref: /openbmc/qemu/accel/kvm/kvm-all.c (revision ff4aa114)
1 /*
2  * QEMU KVM support
3  *
4  * Copyright IBM, Corp. 2008
5  *           Red Hat, Inc. 2008
6  *
7  * Authors:
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *  Glauber Costa     <gcosta@redhat.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  *
14  */
15 
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 
19 #include <linux/kvm.h>
20 
21 #include "qemu/atomic.h"
22 #include "qemu/option.h"
23 #include "qemu/config-file.h"
24 #include "qemu/error-report.h"
25 #include "qapi/error.h"
26 #include "hw/hw.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/cpus.h"
33 #include "qemu/bswap.h"
34 #include "exec/memory.h"
35 #include "exec/ram_addr.h"
36 #include "exec/address-spaces.h"
37 #include "qemu/event_notifier.h"
38 #include "trace.h"
39 #include "hw/irq.h"
40 #include "sysemu/sev.h"
41 #include "sysemu/balloon.h"
42 
43 #include "hw/boards.h"
44 
45 /* This check must be after config-host.h is included */
46 #ifdef CONFIG_EVENTFD
47 #include <sys/eventfd.h>
48 #endif
49 
50 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
51  * need to use the real host PAGE_SIZE, as that's what KVM will use.
52  */
53 #define PAGE_SIZE getpagesize()
54 
55 //#define DEBUG_KVM
56 
57 #ifdef DEBUG_KVM
58 #define DPRINTF(fmt, ...) \
59     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
60 #else
61 #define DPRINTF(fmt, ...) \
62     do { } while (0)
63 #endif
64 
65 #define KVM_MSI_HASHTAB_SIZE    256
66 
67 struct KVMParkedVcpu {
68     unsigned long vcpu_id;
69     int kvm_fd;
70     QLIST_ENTRY(KVMParkedVcpu) node;
71 };
72 
73 struct KVMState
74 {
75     AccelState parent_obj;
76 
77     int nr_slots;
78     int fd;
79     int vmfd;
80     int coalesced_mmio;
81     int coalesced_pio;
82     struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
83     bool coalesced_flush_in_progress;
84     int vcpu_events;
85     int robust_singlestep;
86     int debugregs;
87 #ifdef KVM_CAP_SET_GUEST_DEBUG
88     QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
89 #endif
90     int max_nested_state_len;
91     int many_ioeventfds;
92     int intx_set_mask;
93     bool sync_mmu;
94     bool manual_dirty_log_protect;
95     /* The man page (and posix) say ioctl numbers are signed int, but
96      * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
97      * unsigned, and treating them as signed here can break things */
98     unsigned irq_set_ioctl;
99     unsigned int sigmask_len;
100     GHashTable *gsimap;
101 #ifdef KVM_CAP_IRQ_ROUTING
102     struct kvm_irq_routing *irq_routes;
103     int nr_allocated_irq_routes;
104     unsigned long *used_gsi_bitmap;
105     unsigned int gsi_count;
106     QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
107 #endif
108     KVMMemoryListener memory_listener;
109     QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
110 
111     /* memory encryption */
112     void *memcrypt_handle;
113     int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len);
114 };
115 
116 KVMState *kvm_state;
117 bool kvm_kernel_irqchip;
118 bool kvm_split_irqchip;
119 bool kvm_async_interrupts_allowed;
120 bool kvm_halt_in_kernel_allowed;
121 bool kvm_eventfds_allowed;
122 bool kvm_irqfds_allowed;
123 bool kvm_resamplefds_allowed;
124 bool kvm_msi_via_irqfd_allowed;
125 bool kvm_gsi_routing_allowed;
126 bool kvm_gsi_direct_mapping;
127 bool kvm_allowed;
128 bool kvm_readonly_mem_allowed;
129 bool kvm_vm_attributes_allowed;
130 bool kvm_direct_msi_allowed;
131 bool kvm_ioeventfd_any_length_allowed;
132 bool kvm_msi_use_devid;
133 static bool kvm_immediate_exit;
134 
135 static const KVMCapabilityInfo kvm_required_capabilites[] = {
136     KVM_CAP_INFO(USER_MEMORY),
137     KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
138     KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
139     KVM_CAP_LAST_INFO
140 };
141 
142 #define kvm_slots_lock(kml)      qemu_mutex_lock(&(kml)->slots_lock)
143 #define kvm_slots_unlock(kml)    qemu_mutex_unlock(&(kml)->slots_lock)
144 
145 int kvm_get_max_memslots(void)
146 {
147     KVMState *s = KVM_STATE(current_machine->accelerator);
148 
149     return s->nr_slots;
150 }
151 
152 bool kvm_memcrypt_enabled(void)
153 {
154     if (kvm_state && kvm_state->memcrypt_handle) {
155         return true;
156     }
157 
158     return false;
159 }
160 
161 int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
162 {
163     if (kvm_state->memcrypt_handle &&
164         kvm_state->memcrypt_encrypt_data) {
165         return kvm_state->memcrypt_encrypt_data(kvm_state->memcrypt_handle,
166                                               ptr, len);
167     }
168 
169     return 1;
170 }
171 
172 /* Called with KVMMemoryListener.slots_lock held */
173 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
174 {
175     KVMState *s = kvm_state;
176     int i;
177 
178     for (i = 0; i < s->nr_slots; i++) {
179         if (kml->slots[i].memory_size == 0) {
180             return &kml->slots[i];
181         }
182     }
183 
184     return NULL;
185 }
186 
187 bool kvm_has_free_slot(MachineState *ms)
188 {
189     KVMState *s = KVM_STATE(ms->accelerator);
190     bool result;
191     KVMMemoryListener *kml = &s->memory_listener;
192 
193     kvm_slots_lock(kml);
194     result = !!kvm_get_free_slot(kml);
195     kvm_slots_unlock(kml);
196 
197     return result;
198 }
199 
200 /* Called with KVMMemoryListener.slots_lock held */
201 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
202 {
203     KVMSlot *slot = kvm_get_free_slot(kml);
204 
205     if (slot) {
206         return slot;
207     }
208 
209     fprintf(stderr, "%s: no free slot available\n", __func__);
210     abort();
211 }
212 
213 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
214                                          hwaddr start_addr,
215                                          hwaddr size)
216 {
217     KVMState *s = kvm_state;
218     int i;
219 
220     for (i = 0; i < s->nr_slots; i++) {
221         KVMSlot *mem = &kml->slots[i];
222 
223         if (start_addr == mem->start_addr && size == mem->memory_size) {
224             return mem;
225         }
226     }
227 
228     return NULL;
229 }
230 
231 /*
232  * Calculate and align the start address and the size of the section.
233  * Return the size. If the size is 0, the aligned section is empty.
234  */
235 static hwaddr kvm_align_section(MemoryRegionSection *section,
236                                 hwaddr *start)
237 {
238     hwaddr size = int128_get64(section->size);
239     hwaddr delta, aligned;
240 
241     /* kvm works in page size chunks, but the function may be called
242        with sub-page size and unaligned start address. Pad the start
243        address to next and truncate size to previous page boundary. */
244     aligned = ROUND_UP(section->offset_within_address_space,
245                        qemu_real_host_page_size);
246     delta = aligned - section->offset_within_address_space;
247     *start = aligned;
248     if (delta > size) {
249         return 0;
250     }
251 
252     return (size - delta) & qemu_real_host_page_mask;
253 }
254 
255 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
256                                        hwaddr *phys_addr)
257 {
258     KVMMemoryListener *kml = &s->memory_listener;
259     int i, ret = 0;
260 
261     kvm_slots_lock(kml);
262     for (i = 0; i < s->nr_slots; i++) {
263         KVMSlot *mem = &kml->slots[i];
264 
265         if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
266             *phys_addr = mem->start_addr + (ram - mem->ram);
267             ret = 1;
268             break;
269         }
270     }
271     kvm_slots_unlock(kml);
272 
273     return ret;
274 }
275 
276 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
277 {
278     KVMState *s = kvm_state;
279     struct kvm_userspace_memory_region mem;
280     int ret;
281 
282     mem.slot = slot->slot | (kml->as_id << 16);
283     mem.guest_phys_addr = slot->start_addr;
284     mem.userspace_addr = (unsigned long)slot->ram;
285     mem.flags = slot->flags;
286 
287     if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
288         /* Set the slot size to 0 before setting the slot to the desired
289          * value. This is needed based on KVM commit 75d61fbc. */
290         mem.memory_size = 0;
291         kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
292     }
293     mem.memory_size = slot->memory_size;
294     ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
295     slot->old_flags = mem.flags;
296     trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
297                               mem.memory_size, mem.userspace_addr, ret);
298     return ret;
299 }
300 
301 int kvm_destroy_vcpu(CPUState *cpu)
302 {
303     KVMState *s = kvm_state;
304     long mmap_size;
305     struct KVMParkedVcpu *vcpu = NULL;
306     int ret = 0;
307 
308     DPRINTF("kvm_destroy_vcpu\n");
309 
310     ret = kvm_arch_destroy_vcpu(cpu);
311     if (ret < 0) {
312         goto err;
313     }
314 
315     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
316     if (mmap_size < 0) {
317         ret = mmap_size;
318         DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
319         goto err;
320     }
321 
322     ret = munmap(cpu->kvm_run, mmap_size);
323     if (ret < 0) {
324         goto err;
325     }
326 
327     vcpu = g_malloc0(sizeof(*vcpu));
328     vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
329     vcpu->kvm_fd = cpu->kvm_fd;
330     QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
331 err:
332     return ret;
333 }
334 
335 static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
336 {
337     struct KVMParkedVcpu *cpu;
338 
339     QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
340         if (cpu->vcpu_id == vcpu_id) {
341             int kvm_fd;
342 
343             QLIST_REMOVE(cpu, node);
344             kvm_fd = cpu->kvm_fd;
345             g_free(cpu);
346             return kvm_fd;
347         }
348     }
349 
350     return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
351 }
352 
353 int kvm_init_vcpu(CPUState *cpu)
354 {
355     KVMState *s = kvm_state;
356     long mmap_size;
357     int ret;
358 
359     DPRINTF("kvm_init_vcpu\n");
360 
361     ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
362     if (ret < 0) {
363         DPRINTF("kvm_create_vcpu failed\n");
364         goto err;
365     }
366 
367     cpu->kvm_fd = ret;
368     cpu->kvm_state = s;
369     cpu->vcpu_dirty = true;
370 
371     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
372     if (mmap_size < 0) {
373         ret = mmap_size;
374         DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
375         goto err;
376     }
377 
378     cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
379                         cpu->kvm_fd, 0);
380     if (cpu->kvm_run == MAP_FAILED) {
381         ret = -errno;
382         DPRINTF("mmap'ing vcpu state failed\n");
383         goto err;
384     }
385 
386     if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
387         s->coalesced_mmio_ring =
388             (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
389     }
390 
391     ret = kvm_arch_init_vcpu(cpu);
392 err:
393     return ret;
394 }
395 
396 /*
397  * dirty pages logging control
398  */
399 
400 static int kvm_mem_flags(MemoryRegion *mr)
401 {
402     bool readonly = mr->readonly || memory_region_is_romd(mr);
403     int flags = 0;
404 
405     if (memory_region_get_dirty_log_mask(mr) != 0) {
406         flags |= KVM_MEM_LOG_DIRTY_PAGES;
407     }
408     if (readonly && kvm_readonly_mem_allowed) {
409         flags |= KVM_MEM_READONLY;
410     }
411     return flags;
412 }
413 
414 /* Called with KVMMemoryListener.slots_lock held */
415 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
416                                  MemoryRegion *mr)
417 {
418     mem->flags = kvm_mem_flags(mr);
419 
420     /* If nothing changed effectively, no need to issue ioctl */
421     if (mem->flags == mem->old_flags) {
422         return 0;
423     }
424 
425     return kvm_set_user_memory_region(kml, mem, false);
426 }
427 
428 static int kvm_section_update_flags(KVMMemoryListener *kml,
429                                     MemoryRegionSection *section)
430 {
431     hwaddr start_addr, size;
432     KVMSlot *mem;
433     int ret = 0;
434 
435     size = kvm_align_section(section, &start_addr);
436     if (!size) {
437         return 0;
438     }
439 
440     kvm_slots_lock(kml);
441 
442     mem = kvm_lookup_matching_slot(kml, start_addr, size);
443     if (!mem) {
444         /* We don't have a slot if we want to trap every access. */
445         goto out;
446     }
447 
448     ret = kvm_slot_update_flags(kml, mem, section->mr);
449 
450 out:
451     kvm_slots_unlock(kml);
452     return ret;
453 }
454 
455 static void kvm_log_start(MemoryListener *listener,
456                           MemoryRegionSection *section,
457                           int old, int new)
458 {
459     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
460     int r;
461 
462     if (old != 0) {
463         return;
464     }
465 
466     r = kvm_section_update_flags(kml, section);
467     if (r < 0) {
468         abort();
469     }
470 }
471 
472 static void kvm_log_stop(MemoryListener *listener,
473                           MemoryRegionSection *section,
474                           int old, int new)
475 {
476     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
477     int r;
478 
479     if (new != 0) {
480         return;
481     }
482 
483     r = kvm_section_update_flags(kml, section);
484     if (r < 0) {
485         abort();
486     }
487 }
488 
489 /* get kvm's dirty pages bitmap and update qemu's */
490 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
491                                          unsigned long *bitmap)
492 {
493     ram_addr_t start = section->offset_within_region +
494                        memory_region_get_ram_addr(section->mr);
495     ram_addr_t pages = int128_get64(section->size) / getpagesize();
496 
497     cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
498     return 0;
499 }
500 
501 #define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
502 
503 /**
504  * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
505  *
506  * This function will first try to fetch dirty bitmap from the kernel,
507  * and then updates qemu's dirty bitmap.
508  *
509  * NOTE: caller must be with kml->slots_lock held.
510  *
511  * @kml: the KVM memory listener object
512  * @section: the memory section to sync the dirty bitmap with
513  */
514 static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
515                                           MemoryRegionSection *section)
516 {
517     KVMState *s = kvm_state;
518     struct kvm_dirty_log d = {};
519     KVMSlot *mem;
520     hwaddr start_addr, size;
521     int ret = 0;
522 
523     size = kvm_align_section(section, &start_addr);
524     if (size) {
525         mem = kvm_lookup_matching_slot(kml, start_addr, size);
526         if (!mem) {
527             /* We don't have a slot if we want to trap every access. */
528             goto out;
529         }
530 
531         /* XXX bad kernel interface alert
532          * For dirty bitmap, kernel allocates array of size aligned to
533          * bits-per-long.  But for case when the kernel is 64bits and
534          * the userspace is 32bits, userspace can't align to the same
535          * bits-per-long, since sizeof(long) is different between kernel
536          * and user space.  This way, userspace will provide buffer which
537          * may be 4 bytes less than the kernel will use, resulting in
538          * userspace memory corruption (which is not detectable by valgrind
539          * too, in most cases).
540          * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
541          * a hope that sizeof(long) won't become >8 any time soon.
542          */
543         size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
544                      /*HOST_LONG_BITS*/ 64) / 8;
545         if (!mem->dirty_bmap) {
546             /* Allocate on the first log_sync, once and for all */
547             mem->dirty_bmap = g_malloc0(size);
548         }
549 
550         d.dirty_bitmap = mem->dirty_bmap;
551         d.slot = mem->slot | (kml->as_id << 16);
552         if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
553             DPRINTF("ioctl failed %d\n", errno);
554             ret = -1;
555             goto out;
556         }
557 
558         kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
559     }
560 out:
561     return ret;
562 }
563 
564 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
565 #define KVM_CLEAR_LOG_SHIFT  6
566 #define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
567 #define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)
568 
569 /**
570  * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
571  *
572  * NOTE: this will be a no-op if we haven't enabled manual dirty log
573  * protection in the host kernel because in that case this operation
574  * will be done within log_sync().
575  *
576  * @kml:     the kvm memory listener
577  * @section: the memory range to clear dirty bitmap
578  */
579 static int kvm_physical_log_clear(KVMMemoryListener *kml,
580                                   MemoryRegionSection *section)
581 {
582     KVMState *s = kvm_state;
583     struct kvm_clear_dirty_log d;
584     uint64_t start, end, bmap_start, start_delta, bmap_npages, size;
585     unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
586     KVMSlot *mem = NULL;
587     int ret, i;
588 
589     if (!s->manual_dirty_log_protect) {
590         /* No need to do explicit clear */
591         return 0;
592     }
593 
594     start = section->offset_within_address_space;
595     size = int128_get64(section->size);
596 
597     if (!size) {
598         /* Nothing more we can do... */
599         return 0;
600     }
601 
602     kvm_slots_lock(kml);
603 
604     /* Find any possible slot that covers the section */
605     for (i = 0; i < s->nr_slots; i++) {
606         mem = &kml->slots[i];
607         if (mem->start_addr <= start &&
608             start + size <= mem->start_addr + mem->memory_size) {
609             break;
610         }
611     }
612 
613     /*
614      * We should always find one memslot until this point, otherwise
615      * there could be something wrong from the upper layer
616      */
617     assert(mem && i != s->nr_slots);
618 
619     /*
620      * We need to extend either the start or the size or both to
621      * satisfy the KVM interface requirement.  Firstly, do the start
622      * page alignment on 64 host pages
623      */
624     bmap_start = (start - mem->start_addr) & KVM_CLEAR_LOG_MASK;
625     start_delta = start - mem->start_addr - bmap_start;
626     bmap_start /= psize;
627 
628     /*
629      * The kernel interface has restriction on the size too, that either:
630      *
631      * (1) the size is 64 host pages aligned (just like the start), or
632      * (2) the size fills up until the end of the KVM memslot.
633      */
634     bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
635         << KVM_CLEAR_LOG_SHIFT;
636     end = mem->memory_size / psize;
637     if (bmap_npages > end - bmap_start) {
638         bmap_npages = end - bmap_start;
639     }
640     start_delta /= psize;
641 
642     /*
643      * Prepare the bitmap to clear dirty bits.  Here we must guarantee
644      * that we won't clear any unknown dirty bits otherwise we might
645      * accidentally clear some set bits which are not yet synced from
646      * the kernel into QEMU's bitmap, then we'll lose track of the
647      * guest modifications upon those pages (which can directly lead
648      * to guest data loss or panic after migration).
649      *
650      * Layout of the KVMSlot.dirty_bmap:
651      *
652      *                   |<-------- bmap_npages -----------..>|
653      *                                                     [1]
654      *                     start_delta         size
655      *  |----------------|-------------|------------------|------------|
656      *  ^                ^             ^                               ^
657      *  |                |             |                               |
658      * start          bmap_start     (start)                         end
659      * of memslot                                             of memslot
660      *
661      * [1] bmap_npages can be aligned to either 64 pages or the end of slot
662      */
663 
664     assert(bmap_start % BITS_PER_LONG == 0);
665     /* We should never do log_clear before log_sync */
666     assert(mem->dirty_bmap);
667     if (start_delta) {
668         /* Slow path - we need to manipulate a temp bitmap */
669         bmap_clear = bitmap_new(bmap_npages);
670         bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
671                                     bmap_start, start_delta + size / psize);
672         /*
673          * We need to fill the holes at start because that was not
674          * specified by the caller and we extended the bitmap only for
675          * 64 pages alignment
676          */
677         bitmap_clear(bmap_clear, 0, start_delta);
678         d.dirty_bitmap = bmap_clear;
679     } else {
680         /* Fast path - start address aligns well with BITS_PER_LONG */
681         d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
682     }
683 
684     d.first_page = bmap_start;
685     /* It should never overflow.  If it happens, say something */
686     assert(bmap_npages <= UINT32_MAX);
687     d.num_pages = bmap_npages;
688     d.slot = mem->slot | (kml->as_id << 16);
689 
690     if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
691         ret = -errno;
692         error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
693                      "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
694                      __func__, d.slot, (uint64_t)d.first_page,
695                      (uint32_t)d.num_pages, ret);
696     } else {
697         ret = 0;
698         trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
699     }
700 
701     /*
702      * After we have updated the remote dirty bitmap, we update the
703      * cached bitmap as well for the memslot, then if another user
704      * clears the same region we know we shouldn't clear it again on
705      * the remote otherwise it's data loss as well.
706      */
707     bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
708                  size / psize);
709     /* This handles the NULL case well */
710     g_free(bmap_clear);
711 
712     kvm_slots_unlock(kml);
713 
714     return ret;
715 }
716 
717 static void kvm_coalesce_mmio_region(MemoryListener *listener,
718                                      MemoryRegionSection *secion,
719                                      hwaddr start, hwaddr size)
720 {
721     KVMState *s = kvm_state;
722 
723     if (s->coalesced_mmio) {
724         struct kvm_coalesced_mmio_zone zone;
725 
726         zone.addr = start;
727         zone.size = size;
728         zone.pad = 0;
729 
730         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
731     }
732 }
733 
734 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
735                                        MemoryRegionSection *secion,
736                                        hwaddr start, hwaddr size)
737 {
738     KVMState *s = kvm_state;
739 
740     if (s->coalesced_mmio) {
741         struct kvm_coalesced_mmio_zone zone;
742 
743         zone.addr = start;
744         zone.size = size;
745         zone.pad = 0;
746 
747         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
748     }
749 }
750 
751 static void kvm_coalesce_pio_add(MemoryListener *listener,
752                                 MemoryRegionSection *section,
753                                 hwaddr start, hwaddr size)
754 {
755     KVMState *s = kvm_state;
756 
757     if (s->coalesced_pio) {
758         struct kvm_coalesced_mmio_zone zone;
759 
760         zone.addr = start;
761         zone.size = size;
762         zone.pio = 1;
763 
764         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
765     }
766 }
767 
768 static void kvm_coalesce_pio_del(MemoryListener *listener,
769                                 MemoryRegionSection *section,
770                                 hwaddr start, hwaddr size)
771 {
772     KVMState *s = kvm_state;
773 
774     if (s->coalesced_pio) {
775         struct kvm_coalesced_mmio_zone zone;
776 
777         zone.addr = start;
778         zone.size = size;
779         zone.pio = 1;
780 
781         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
782      }
783 }
784 
785 static MemoryListener kvm_coalesced_pio_listener = {
786     .coalesced_io_add = kvm_coalesce_pio_add,
787     .coalesced_io_del = kvm_coalesce_pio_del,
788 };
789 
790 int kvm_check_extension(KVMState *s, unsigned int extension)
791 {
792     int ret;
793 
794     ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
795     if (ret < 0) {
796         ret = 0;
797     }
798 
799     return ret;
800 }
801 
802 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
803 {
804     int ret;
805 
806     ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
807     if (ret < 0) {
808         /* VM wide version not implemented, use global one instead */
809         ret = kvm_check_extension(s, extension);
810     }
811 
812     return ret;
813 }
814 
815 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
816 {
817 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
818     /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
819      * endianness, but the memory core hands them in target endianness.
820      * For example, PPC is always treated as big-endian even if running
821      * on KVM and on PPC64LE.  Correct here.
822      */
823     switch (size) {
824     case 2:
825         val = bswap16(val);
826         break;
827     case 4:
828         val = bswap32(val);
829         break;
830     }
831 #endif
832     return val;
833 }
834 
835 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
836                                   bool assign, uint32_t size, bool datamatch)
837 {
838     int ret;
839     struct kvm_ioeventfd iofd = {
840         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
841         .addr = addr,
842         .len = size,
843         .flags = 0,
844         .fd = fd,
845     };
846 
847     trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
848                                  datamatch);
849     if (!kvm_enabled()) {
850         return -ENOSYS;
851     }
852 
853     if (datamatch) {
854         iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
855     }
856     if (!assign) {
857         iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
858     }
859 
860     ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
861 
862     if (ret < 0) {
863         return -errno;
864     }
865 
866     return 0;
867 }
868 
869 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
870                                  bool assign, uint32_t size, bool datamatch)
871 {
872     struct kvm_ioeventfd kick = {
873         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
874         .addr = addr,
875         .flags = KVM_IOEVENTFD_FLAG_PIO,
876         .len = size,
877         .fd = fd,
878     };
879     int r;
880     trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
881     if (!kvm_enabled()) {
882         return -ENOSYS;
883     }
884     if (datamatch) {
885         kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
886     }
887     if (!assign) {
888         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
889     }
890     r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
891     if (r < 0) {
892         return r;
893     }
894     return 0;
895 }
896 
897 
898 static int kvm_check_many_ioeventfds(void)
899 {
900     /* Userspace can use ioeventfd for io notification.  This requires a host
901      * that supports eventfd(2) and an I/O thread; since eventfd does not
902      * support SIGIO it cannot interrupt the vcpu.
903      *
904      * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
905      * can avoid creating too many ioeventfds.
906      */
907 #if defined(CONFIG_EVENTFD)
908     int ioeventfds[7];
909     int i, ret = 0;
910     for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
911         ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
912         if (ioeventfds[i] < 0) {
913             break;
914         }
915         ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
916         if (ret < 0) {
917             close(ioeventfds[i]);
918             break;
919         }
920     }
921 
922     /* Decide whether many devices are supported or not */
923     ret = i == ARRAY_SIZE(ioeventfds);
924 
925     while (i-- > 0) {
926         kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
927         close(ioeventfds[i]);
928     }
929     return ret;
930 #else
931     return 0;
932 #endif
933 }
934 
935 static const KVMCapabilityInfo *
936 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
937 {
938     while (list->name) {
939         if (!kvm_check_extension(s, list->value)) {
940             return list;
941         }
942         list++;
943     }
944     return NULL;
945 }
946 
947 static void kvm_set_phys_mem(KVMMemoryListener *kml,
948                              MemoryRegionSection *section, bool add)
949 {
950     KVMSlot *mem;
951     int err;
952     MemoryRegion *mr = section->mr;
953     bool writeable = !mr->readonly && !mr->rom_device;
954     hwaddr start_addr, size;
955     void *ram;
956 
957     if (!memory_region_is_ram(mr)) {
958         if (writeable || !kvm_readonly_mem_allowed) {
959             return;
960         } else if (!mr->romd_mode) {
961             /* If the memory device is not in romd_mode, then we actually want
962              * to remove the kvm memory slot so all accesses will trap. */
963             add = false;
964         }
965     }
966 
967     size = kvm_align_section(section, &start_addr);
968     if (!size) {
969         return;
970     }
971 
972     /* use aligned delta to align the ram address */
973     ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
974           (start_addr - section->offset_within_address_space);
975 
976     kvm_slots_lock(kml);
977 
978     if (!add) {
979         mem = kvm_lookup_matching_slot(kml, start_addr, size);
980         if (!mem) {
981             goto out;
982         }
983         if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
984             kvm_physical_sync_dirty_bitmap(kml, section);
985         }
986 
987         /* unregister the slot */
988         g_free(mem->dirty_bmap);
989         mem->dirty_bmap = NULL;
990         mem->memory_size = 0;
991         mem->flags = 0;
992         err = kvm_set_user_memory_region(kml, mem, false);
993         if (err) {
994             fprintf(stderr, "%s: error unregistering slot: %s\n",
995                     __func__, strerror(-err));
996             abort();
997         }
998         goto out;
999     }
1000 
1001     /* register the new slot */
1002     mem = kvm_alloc_slot(kml);
1003     mem->memory_size = size;
1004     mem->start_addr = start_addr;
1005     mem->ram = ram;
1006     mem->flags = kvm_mem_flags(mr);
1007 
1008     err = kvm_set_user_memory_region(kml, mem, true);
1009     if (err) {
1010         fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1011                 strerror(-err));
1012         abort();
1013     }
1014 
1015 out:
1016     kvm_slots_unlock(kml);
1017 }
1018 
1019 static void kvm_region_add(MemoryListener *listener,
1020                            MemoryRegionSection *section)
1021 {
1022     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1023 
1024     memory_region_ref(section->mr);
1025     kvm_set_phys_mem(kml, section, true);
1026 }
1027 
1028 static void kvm_region_del(MemoryListener *listener,
1029                            MemoryRegionSection *section)
1030 {
1031     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1032 
1033     kvm_set_phys_mem(kml, section, false);
1034     memory_region_unref(section->mr);
1035 }
1036 
1037 static void kvm_log_sync(MemoryListener *listener,
1038                          MemoryRegionSection *section)
1039 {
1040     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1041     int r;
1042 
1043     kvm_slots_lock(kml);
1044     r = kvm_physical_sync_dirty_bitmap(kml, section);
1045     kvm_slots_unlock(kml);
1046     if (r < 0) {
1047         abort();
1048     }
1049 }
1050 
1051 static void kvm_log_clear(MemoryListener *listener,
1052                           MemoryRegionSection *section)
1053 {
1054     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1055     int r;
1056 
1057     r = kvm_physical_log_clear(kml, section);
1058     if (r < 0) {
1059         error_report_once("%s: kvm log clear failed: mr=%s "
1060                           "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1061                           section->mr->name, section->offset_within_region,
1062                           int128_get64(section->size));
1063         abort();
1064     }
1065 }
1066 
1067 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1068                                   MemoryRegionSection *section,
1069                                   bool match_data, uint64_t data,
1070                                   EventNotifier *e)
1071 {
1072     int fd = event_notifier_get_fd(e);
1073     int r;
1074 
1075     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1076                                data, true, int128_get64(section->size),
1077                                match_data);
1078     if (r < 0) {
1079         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1080                 __func__, strerror(-r), -r);
1081         abort();
1082     }
1083 }
1084 
1085 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1086                                   MemoryRegionSection *section,
1087                                   bool match_data, uint64_t data,
1088                                   EventNotifier *e)
1089 {
1090     int fd = event_notifier_get_fd(e);
1091     int r;
1092 
1093     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1094                                data, false, int128_get64(section->size),
1095                                match_data);
1096     if (r < 0) {
1097         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1098                 __func__, strerror(-r), -r);
1099         abort();
1100     }
1101 }
1102 
1103 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1104                                  MemoryRegionSection *section,
1105                                  bool match_data, uint64_t data,
1106                                  EventNotifier *e)
1107 {
1108     int fd = event_notifier_get_fd(e);
1109     int r;
1110 
1111     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1112                               data, true, int128_get64(section->size),
1113                               match_data);
1114     if (r < 0) {
1115         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1116                 __func__, strerror(-r), -r);
1117         abort();
1118     }
1119 }
1120 
1121 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1122                                  MemoryRegionSection *section,
1123                                  bool match_data, uint64_t data,
1124                                  EventNotifier *e)
1125 
1126 {
1127     int fd = event_notifier_get_fd(e);
1128     int r;
1129 
1130     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1131                               data, false, int128_get64(section->size),
1132                               match_data);
1133     if (r < 0) {
1134         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1135                 __func__, strerror(-r), -r);
1136         abort();
1137     }
1138 }
1139 
1140 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1141                                   AddressSpace *as, int as_id)
1142 {
1143     int i;
1144 
1145     qemu_mutex_init(&kml->slots_lock);
1146     kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
1147     kml->as_id = as_id;
1148 
1149     for (i = 0; i < s->nr_slots; i++) {
1150         kml->slots[i].slot = i;
1151     }
1152 
1153     kml->listener.region_add = kvm_region_add;
1154     kml->listener.region_del = kvm_region_del;
1155     kml->listener.log_start = kvm_log_start;
1156     kml->listener.log_stop = kvm_log_stop;
1157     kml->listener.log_sync = kvm_log_sync;
1158     kml->listener.log_clear = kvm_log_clear;
1159     kml->listener.priority = 10;
1160 
1161     memory_listener_register(&kml->listener, as);
1162 }
1163 
1164 static MemoryListener kvm_io_listener = {
1165     .eventfd_add = kvm_io_ioeventfd_add,
1166     .eventfd_del = kvm_io_ioeventfd_del,
1167     .priority = 10,
1168 };
1169 
1170 int kvm_set_irq(KVMState *s, int irq, int level)
1171 {
1172     struct kvm_irq_level event;
1173     int ret;
1174 
1175     assert(kvm_async_interrupts_enabled());
1176 
1177     event.level = level;
1178     event.irq = irq;
1179     ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1180     if (ret < 0) {
1181         perror("kvm_set_irq");
1182         abort();
1183     }
1184 
1185     return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1186 }
1187 
1188 #ifdef KVM_CAP_IRQ_ROUTING
1189 typedef struct KVMMSIRoute {
1190     struct kvm_irq_routing_entry kroute;
1191     QTAILQ_ENTRY(KVMMSIRoute) entry;
1192 } KVMMSIRoute;
1193 
1194 static void set_gsi(KVMState *s, unsigned int gsi)
1195 {
1196     set_bit(gsi, s->used_gsi_bitmap);
1197 }
1198 
1199 static void clear_gsi(KVMState *s, unsigned int gsi)
1200 {
1201     clear_bit(gsi, s->used_gsi_bitmap);
1202 }
1203 
1204 void kvm_init_irq_routing(KVMState *s)
1205 {
1206     int gsi_count, i;
1207 
1208     gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1209     if (gsi_count > 0) {
1210         /* Round up so we can search ints using ffs */
1211         s->used_gsi_bitmap = bitmap_new(gsi_count);
1212         s->gsi_count = gsi_count;
1213     }
1214 
1215     s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1216     s->nr_allocated_irq_routes = 0;
1217 
1218     if (!kvm_direct_msi_allowed) {
1219         for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1220             QTAILQ_INIT(&s->msi_hashtab[i]);
1221         }
1222     }
1223 
1224     kvm_arch_init_irq_routing(s);
1225 }
1226 
1227 void kvm_irqchip_commit_routes(KVMState *s)
1228 {
1229     int ret;
1230 
1231     if (kvm_gsi_direct_mapping()) {
1232         return;
1233     }
1234 
1235     if (!kvm_gsi_routing_enabled()) {
1236         return;
1237     }
1238 
1239     s->irq_routes->flags = 0;
1240     trace_kvm_irqchip_commit_routes();
1241     ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1242     assert(ret == 0);
1243 }
1244 
1245 static void kvm_add_routing_entry(KVMState *s,
1246                                   struct kvm_irq_routing_entry *entry)
1247 {
1248     struct kvm_irq_routing_entry *new;
1249     int n, size;
1250 
1251     if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1252         n = s->nr_allocated_irq_routes * 2;
1253         if (n < 64) {
1254             n = 64;
1255         }
1256         size = sizeof(struct kvm_irq_routing);
1257         size += n * sizeof(*new);
1258         s->irq_routes = g_realloc(s->irq_routes, size);
1259         s->nr_allocated_irq_routes = n;
1260     }
1261     n = s->irq_routes->nr++;
1262     new = &s->irq_routes->entries[n];
1263 
1264     *new = *entry;
1265 
1266     set_gsi(s, entry->gsi);
1267 }
1268 
1269 static int kvm_update_routing_entry(KVMState *s,
1270                                     struct kvm_irq_routing_entry *new_entry)
1271 {
1272     struct kvm_irq_routing_entry *entry;
1273     int n;
1274 
1275     for (n = 0; n < s->irq_routes->nr; n++) {
1276         entry = &s->irq_routes->entries[n];
1277         if (entry->gsi != new_entry->gsi) {
1278             continue;
1279         }
1280 
1281         if(!memcmp(entry, new_entry, sizeof *entry)) {
1282             return 0;
1283         }
1284 
1285         *entry = *new_entry;
1286 
1287         return 0;
1288     }
1289 
1290     return -ESRCH;
1291 }
1292 
1293 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1294 {
1295     struct kvm_irq_routing_entry e = {};
1296 
1297     assert(pin < s->gsi_count);
1298 
1299     e.gsi = irq;
1300     e.type = KVM_IRQ_ROUTING_IRQCHIP;
1301     e.flags = 0;
1302     e.u.irqchip.irqchip = irqchip;
1303     e.u.irqchip.pin = pin;
1304     kvm_add_routing_entry(s, &e);
1305 }
1306 
1307 void kvm_irqchip_release_virq(KVMState *s, int virq)
1308 {
1309     struct kvm_irq_routing_entry *e;
1310     int i;
1311 
1312     if (kvm_gsi_direct_mapping()) {
1313         return;
1314     }
1315 
1316     for (i = 0; i < s->irq_routes->nr; i++) {
1317         e = &s->irq_routes->entries[i];
1318         if (e->gsi == virq) {
1319             s->irq_routes->nr--;
1320             *e = s->irq_routes->entries[s->irq_routes->nr];
1321         }
1322     }
1323     clear_gsi(s, virq);
1324     kvm_arch_release_virq_post(virq);
1325     trace_kvm_irqchip_release_virq(virq);
1326 }
1327 
1328 static unsigned int kvm_hash_msi(uint32_t data)
1329 {
1330     /* This is optimized for IA32 MSI layout. However, no other arch shall
1331      * repeat the mistake of not providing a direct MSI injection API. */
1332     return data & 0xff;
1333 }
1334 
1335 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1336 {
1337     KVMMSIRoute *route, *next;
1338     unsigned int hash;
1339 
1340     for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1341         QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1342             kvm_irqchip_release_virq(s, route->kroute.gsi);
1343             QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1344             g_free(route);
1345         }
1346     }
1347 }
1348 
1349 static int kvm_irqchip_get_virq(KVMState *s)
1350 {
1351     int next_virq;
1352 
1353     /*
1354      * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1355      * GSI numbers are more than the number of IRQ route. Allocating a GSI
1356      * number can succeed even though a new route entry cannot be added.
1357      * When this happens, flush dynamic MSI entries to free IRQ route entries.
1358      */
1359     if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1360         kvm_flush_dynamic_msi_routes(s);
1361     }
1362 
1363     /* Return the lowest unused GSI in the bitmap */
1364     next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1365     if (next_virq >= s->gsi_count) {
1366         return -ENOSPC;
1367     } else {
1368         return next_virq;
1369     }
1370 }
1371 
1372 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1373 {
1374     unsigned int hash = kvm_hash_msi(msg.data);
1375     KVMMSIRoute *route;
1376 
1377     QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1378         if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1379             route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1380             route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1381             return route;
1382         }
1383     }
1384     return NULL;
1385 }
1386 
1387 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1388 {
1389     struct kvm_msi msi;
1390     KVMMSIRoute *route;
1391 
1392     if (kvm_direct_msi_allowed) {
1393         msi.address_lo = (uint32_t)msg.address;
1394         msi.address_hi = msg.address >> 32;
1395         msi.data = le32_to_cpu(msg.data);
1396         msi.flags = 0;
1397         memset(msi.pad, 0, sizeof(msi.pad));
1398 
1399         return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1400     }
1401 
1402     route = kvm_lookup_msi_route(s, msg);
1403     if (!route) {
1404         int virq;
1405 
1406         virq = kvm_irqchip_get_virq(s);
1407         if (virq < 0) {
1408             return virq;
1409         }
1410 
1411         route = g_malloc0(sizeof(KVMMSIRoute));
1412         route->kroute.gsi = virq;
1413         route->kroute.type = KVM_IRQ_ROUTING_MSI;
1414         route->kroute.flags = 0;
1415         route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1416         route->kroute.u.msi.address_hi = msg.address >> 32;
1417         route->kroute.u.msi.data = le32_to_cpu(msg.data);
1418 
1419         kvm_add_routing_entry(s, &route->kroute);
1420         kvm_irqchip_commit_routes(s);
1421 
1422         QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1423                            entry);
1424     }
1425 
1426     assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1427 
1428     return kvm_set_irq(s, route->kroute.gsi, 1);
1429 }
1430 
1431 int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1432 {
1433     struct kvm_irq_routing_entry kroute = {};
1434     int virq;
1435     MSIMessage msg = {0, 0};
1436 
1437     if (pci_available && dev) {
1438         msg = pci_get_msi_message(dev, vector);
1439     }
1440 
1441     if (kvm_gsi_direct_mapping()) {
1442         return kvm_arch_msi_data_to_gsi(msg.data);
1443     }
1444 
1445     if (!kvm_gsi_routing_enabled()) {
1446         return -ENOSYS;
1447     }
1448 
1449     virq = kvm_irqchip_get_virq(s);
1450     if (virq < 0) {
1451         return virq;
1452     }
1453 
1454     kroute.gsi = virq;
1455     kroute.type = KVM_IRQ_ROUTING_MSI;
1456     kroute.flags = 0;
1457     kroute.u.msi.address_lo = (uint32_t)msg.address;
1458     kroute.u.msi.address_hi = msg.address >> 32;
1459     kroute.u.msi.data = le32_to_cpu(msg.data);
1460     if (pci_available && kvm_msi_devid_required()) {
1461         kroute.flags = KVM_MSI_VALID_DEVID;
1462         kroute.u.msi.devid = pci_requester_id(dev);
1463     }
1464     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1465         kvm_irqchip_release_virq(s, virq);
1466         return -EINVAL;
1467     }
1468 
1469     trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
1470                                     vector, virq);
1471 
1472     kvm_add_routing_entry(s, &kroute);
1473     kvm_arch_add_msi_route_post(&kroute, vector, dev);
1474     kvm_irqchip_commit_routes(s);
1475 
1476     return virq;
1477 }
1478 
1479 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
1480                                  PCIDevice *dev)
1481 {
1482     struct kvm_irq_routing_entry kroute = {};
1483 
1484     if (kvm_gsi_direct_mapping()) {
1485         return 0;
1486     }
1487 
1488     if (!kvm_irqchip_in_kernel()) {
1489         return -ENOSYS;
1490     }
1491 
1492     kroute.gsi = virq;
1493     kroute.type = KVM_IRQ_ROUTING_MSI;
1494     kroute.flags = 0;
1495     kroute.u.msi.address_lo = (uint32_t)msg.address;
1496     kroute.u.msi.address_hi = msg.address >> 32;
1497     kroute.u.msi.data = le32_to_cpu(msg.data);
1498     if (pci_available && kvm_msi_devid_required()) {
1499         kroute.flags = KVM_MSI_VALID_DEVID;
1500         kroute.u.msi.devid = pci_requester_id(dev);
1501     }
1502     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1503         return -EINVAL;
1504     }
1505 
1506     trace_kvm_irqchip_update_msi_route(virq);
1507 
1508     return kvm_update_routing_entry(s, &kroute);
1509 }
1510 
1511 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
1512                                     bool assign)
1513 {
1514     struct kvm_irqfd irqfd = {
1515         .fd = fd,
1516         .gsi = virq,
1517         .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1518     };
1519 
1520     if (rfd != -1) {
1521         irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1522         irqfd.resamplefd = rfd;
1523     }
1524 
1525     if (!kvm_irqfds_enabled()) {
1526         return -ENOSYS;
1527     }
1528 
1529     return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1530 }
1531 
1532 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1533 {
1534     struct kvm_irq_routing_entry kroute = {};
1535     int virq;
1536 
1537     if (!kvm_gsi_routing_enabled()) {
1538         return -ENOSYS;
1539     }
1540 
1541     virq = kvm_irqchip_get_virq(s);
1542     if (virq < 0) {
1543         return virq;
1544     }
1545 
1546     kroute.gsi = virq;
1547     kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1548     kroute.flags = 0;
1549     kroute.u.adapter.summary_addr = adapter->summary_addr;
1550     kroute.u.adapter.ind_addr = adapter->ind_addr;
1551     kroute.u.adapter.summary_offset = adapter->summary_offset;
1552     kroute.u.adapter.ind_offset = adapter->ind_offset;
1553     kroute.u.adapter.adapter_id = adapter->adapter_id;
1554 
1555     kvm_add_routing_entry(s, &kroute);
1556 
1557     return virq;
1558 }
1559 
1560 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1561 {
1562     struct kvm_irq_routing_entry kroute = {};
1563     int virq;
1564 
1565     if (!kvm_gsi_routing_enabled()) {
1566         return -ENOSYS;
1567     }
1568     if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
1569         return -ENOSYS;
1570     }
1571     virq = kvm_irqchip_get_virq(s);
1572     if (virq < 0) {
1573         return virq;
1574     }
1575 
1576     kroute.gsi = virq;
1577     kroute.type = KVM_IRQ_ROUTING_HV_SINT;
1578     kroute.flags = 0;
1579     kroute.u.hv_sint.vcpu = vcpu;
1580     kroute.u.hv_sint.sint = sint;
1581 
1582     kvm_add_routing_entry(s, &kroute);
1583     kvm_irqchip_commit_routes(s);
1584 
1585     return virq;
1586 }
1587 
1588 #else /* !KVM_CAP_IRQ_ROUTING */
1589 
1590 void kvm_init_irq_routing(KVMState *s)
1591 {
1592 }
1593 
1594 void kvm_irqchip_release_virq(KVMState *s, int virq)
1595 {
1596 }
1597 
1598 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1599 {
1600     abort();
1601 }
1602 
1603 int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1604 {
1605     return -ENOSYS;
1606 }
1607 
1608 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1609 {
1610     return -ENOSYS;
1611 }
1612 
1613 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1614 {
1615     return -ENOSYS;
1616 }
1617 
1618 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1619 {
1620     abort();
1621 }
1622 
1623 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1624 {
1625     return -ENOSYS;
1626 }
1627 #endif /* !KVM_CAP_IRQ_ROUTING */
1628 
1629 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1630                                        EventNotifier *rn, int virq)
1631 {
1632     return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
1633            rn ? event_notifier_get_fd(rn) : -1, virq, true);
1634 }
1635 
1636 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1637                                           int virq)
1638 {
1639     return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
1640            false);
1641 }
1642 
1643 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1644                                    EventNotifier *rn, qemu_irq irq)
1645 {
1646     gpointer key, gsi;
1647     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1648 
1649     if (!found) {
1650         return -ENXIO;
1651     }
1652     return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
1653 }
1654 
1655 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
1656                                       qemu_irq irq)
1657 {
1658     gpointer key, gsi;
1659     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1660 
1661     if (!found) {
1662         return -ENXIO;
1663     }
1664     return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
1665 }
1666 
1667 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
1668 {
1669     g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
1670 }
1671 
1672 static void kvm_irqchip_create(MachineState *machine, KVMState *s)
1673 {
1674     int ret;
1675 
1676     if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1677         ;
1678     } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
1679         ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
1680         if (ret < 0) {
1681             fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
1682             exit(1);
1683         }
1684     } else {
1685         return;
1686     }
1687 
1688     /* First probe and see if there's a arch-specific hook to create the
1689      * in-kernel irqchip for us */
1690     ret = kvm_arch_irqchip_create(machine, s);
1691     if (ret == 0) {
1692         if (machine_kernel_irqchip_split(machine)) {
1693             perror("Split IRQ chip mode not supported.");
1694             exit(1);
1695         } else {
1696             ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1697         }
1698     }
1699     if (ret < 0) {
1700         fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
1701         exit(1);
1702     }
1703 
1704     kvm_kernel_irqchip = true;
1705     /* If we have an in-kernel IRQ chip then we must have asynchronous
1706      * interrupt delivery (though the reverse is not necessarily true)
1707      */
1708     kvm_async_interrupts_allowed = true;
1709     kvm_halt_in_kernel_allowed = true;
1710 
1711     kvm_init_irq_routing(s);
1712 
1713     s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
1714 }
1715 
1716 /* Find number of supported CPUs using the recommended
1717  * procedure from the kernel API documentation to cope with
1718  * older kernels that may be missing capabilities.
1719  */
1720 static int kvm_recommended_vcpus(KVMState *s)
1721 {
1722     int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
1723     return (ret) ? ret : 4;
1724 }
1725 
1726 static int kvm_max_vcpus(KVMState *s)
1727 {
1728     int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1729     return (ret) ? ret : kvm_recommended_vcpus(s);
1730 }
1731 
1732 static int kvm_max_vcpu_id(KVMState *s)
1733 {
1734     int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
1735     return (ret) ? ret : kvm_max_vcpus(s);
1736 }
1737 
1738 bool kvm_vcpu_id_is_valid(int vcpu_id)
1739 {
1740     KVMState *s = KVM_STATE(current_machine->accelerator);
1741     return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
1742 }
1743 
1744 static int kvm_init(MachineState *ms)
1745 {
1746     MachineClass *mc = MACHINE_GET_CLASS(ms);
1747     static const char upgrade_note[] =
1748         "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1749         "(see http://sourceforge.net/projects/kvm).\n";
1750     struct {
1751         const char *name;
1752         int num;
1753     } num_cpus[] = {
1754         { "SMP",          ms->smp.cpus },
1755         { "hotpluggable", ms->smp.max_cpus },
1756         { NULL, }
1757     }, *nc = num_cpus;
1758     int soft_vcpus_limit, hard_vcpus_limit;
1759     KVMState *s;
1760     const KVMCapabilityInfo *missing_cap;
1761     int ret;
1762     int type = 0;
1763     const char *kvm_type;
1764 
1765     s = KVM_STATE(ms->accelerator);
1766 
1767     /*
1768      * On systems where the kernel can support different base page
1769      * sizes, host page size may be different from TARGET_PAGE_SIZE,
1770      * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
1771      * page size for the system though.
1772      */
1773     assert(TARGET_PAGE_SIZE <= getpagesize());
1774 
1775     s->sigmask_len = 8;
1776 
1777 #ifdef KVM_CAP_SET_GUEST_DEBUG
1778     QTAILQ_INIT(&s->kvm_sw_breakpoints);
1779 #endif
1780     QLIST_INIT(&s->kvm_parked_vcpus);
1781     s->vmfd = -1;
1782     s->fd = qemu_open("/dev/kvm", O_RDWR);
1783     if (s->fd == -1) {
1784         fprintf(stderr, "Could not access KVM kernel module: %m\n");
1785         ret = -errno;
1786         goto err;
1787     }
1788 
1789     ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1790     if (ret < KVM_API_VERSION) {
1791         if (ret >= 0) {
1792             ret = -EINVAL;
1793         }
1794         fprintf(stderr, "kvm version too old\n");
1795         goto err;
1796     }
1797 
1798     if (ret > KVM_API_VERSION) {
1799         ret = -EINVAL;
1800         fprintf(stderr, "kvm version not supported\n");
1801         goto err;
1802     }
1803 
1804     kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
1805     s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1806 
1807     /* If unspecified, use the default value */
1808     if (!s->nr_slots) {
1809         s->nr_slots = 32;
1810     }
1811 
1812     kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1813     if (mc->kvm_type) {
1814         type = mc->kvm_type(ms, kvm_type);
1815     } else if (kvm_type) {
1816         ret = -EINVAL;
1817         fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
1818         goto err;
1819     }
1820 
1821     do {
1822         ret = kvm_ioctl(s, KVM_CREATE_VM, type);
1823     } while (ret == -EINTR);
1824 
1825     if (ret < 0) {
1826         fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
1827                 strerror(-ret));
1828 
1829 #ifdef TARGET_S390X
1830         if (ret == -EINVAL) {
1831             fprintf(stderr,
1832                     "Host kernel setup problem detected. Please verify:\n");
1833             fprintf(stderr, "- for kernels supporting the switch_amode or"
1834                     " user_mode parameters, whether\n");
1835             fprintf(stderr,
1836                     "  user space is running in primary address space\n");
1837             fprintf(stderr,
1838                     "- for kernels supporting the vm.allocate_pgste sysctl, "
1839                     "whether it is enabled\n");
1840         }
1841 #endif
1842         goto err;
1843     }
1844 
1845     s->vmfd = ret;
1846 
1847     /* check the vcpu limits */
1848     soft_vcpus_limit = kvm_recommended_vcpus(s);
1849     hard_vcpus_limit = kvm_max_vcpus(s);
1850 
1851     while (nc->name) {
1852         if (nc->num > soft_vcpus_limit) {
1853             warn_report("Number of %s cpus requested (%d) exceeds "
1854                         "the recommended cpus supported by KVM (%d)",
1855                         nc->name, nc->num, soft_vcpus_limit);
1856 
1857             if (nc->num > hard_vcpus_limit) {
1858                 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
1859                         "the maximum cpus supported by KVM (%d)\n",
1860                         nc->name, nc->num, hard_vcpus_limit);
1861                 exit(1);
1862             }
1863         }
1864         nc++;
1865     }
1866 
1867     missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1868     if (!missing_cap) {
1869         missing_cap =
1870             kvm_check_extension_list(s, kvm_arch_required_capabilities);
1871     }
1872     if (missing_cap) {
1873         ret = -EINVAL;
1874         fprintf(stderr, "kvm does not support %s\n%s",
1875                 missing_cap->name, upgrade_note);
1876         goto err;
1877     }
1878 
1879     s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1880     s->coalesced_pio = s->coalesced_mmio &&
1881                        kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
1882 
1883     s->manual_dirty_log_protect =
1884         kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
1885     if (s->manual_dirty_log_protect) {
1886         ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
1887         if (ret) {
1888             warn_report("Trying to enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 "
1889                         "but failed.  Falling back to the legacy mode. ");
1890             s->manual_dirty_log_protect = false;
1891         }
1892     }
1893 
1894 #ifdef KVM_CAP_VCPU_EVENTS
1895     s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1896 #endif
1897 
1898     s->robust_singlestep =
1899         kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1900 
1901 #ifdef KVM_CAP_DEBUGREGS
1902     s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1903 #endif
1904 
1905     s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
1906 
1907 #ifdef KVM_CAP_IRQ_ROUTING
1908     kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1909 #endif
1910 
1911     s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1912 
1913     s->irq_set_ioctl = KVM_IRQ_LINE;
1914     if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1915         s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
1916     }
1917 
1918     kvm_readonly_mem_allowed =
1919         (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1920 
1921     kvm_eventfds_allowed =
1922         (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
1923 
1924     kvm_irqfds_allowed =
1925         (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
1926 
1927     kvm_resamplefds_allowed =
1928         (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
1929 
1930     kvm_vm_attributes_allowed =
1931         (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
1932 
1933     kvm_ioeventfd_any_length_allowed =
1934         (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
1935 
1936     kvm_state = s;
1937 
1938     /*
1939      * if memory encryption object is specified then initialize the memory
1940      * encryption context.
1941      */
1942     if (ms->memory_encryption) {
1943         kvm_state->memcrypt_handle = sev_guest_init(ms->memory_encryption);
1944         if (!kvm_state->memcrypt_handle) {
1945             ret = -1;
1946             goto err;
1947         }
1948 
1949         kvm_state->memcrypt_encrypt_data = sev_encrypt_data;
1950     }
1951 
1952     ret = kvm_arch_init(ms, s);
1953     if (ret < 0) {
1954         goto err;
1955     }
1956 
1957     if (machine_kernel_irqchip_allowed(ms)) {
1958         kvm_irqchip_create(ms, s);
1959     }
1960 
1961     if (kvm_eventfds_allowed) {
1962         s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
1963         s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
1964     }
1965     s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
1966     s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
1967 
1968     kvm_memory_listener_register(s, &s->memory_listener,
1969                                  &address_space_memory, 0);
1970     memory_listener_register(&kvm_io_listener,
1971                              &address_space_io);
1972     memory_listener_register(&kvm_coalesced_pio_listener,
1973                              &address_space_io);
1974 
1975     s->many_ioeventfds = kvm_check_many_ioeventfds();
1976 
1977     s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1978     if (!s->sync_mmu) {
1979         qemu_balloon_inhibit(true);
1980     }
1981 
1982     return 0;
1983 
1984 err:
1985     assert(ret < 0);
1986     if (s->vmfd >= 0) {
1987         close(s->vmfd);
1988     }
1989     if (s->fd != -1) {
1990         close(s->fd);
1991     }
1992     g_free(s->memory_listener.slots);
1993 
1994     return ret;
1995 }
1996 
1997 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
1998 {
1999     s->sigmask_len = sigmask_len;
2000 }
2001 
2002 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2003                           int size, uint32_t count)
2004 {
2005     int i;
2006     uint8_t *ptr = data;
2007 
2008     for (i = 0; i < count; i++) {
2009         address_space_rw(&address_space_io, port, attrs,
2010                          ptr, size,
2011                          direction == KVM_EXIT_IO_OUT);
2012         ptr += size;
2013     }
2014 }
2015 
2016 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2017 {
2018     fprintf(stderr, "KVM internal error. Suberror: %d\n",
2019             run->internal.suberror);
2020 
2021     if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
2022         int i;
2023 
2024         for (i = 0; i < run->internal.ndata; ++i) {
2025             fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
2026                     i, (uint64_t)run->internal.data[i]);
2027         }
2028     }
2029     if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2030         fprintf(stderr, "emulation failure\n");
2031         if (!kvm_arch_stop_on_emulation_error(cpu)) {
2032             cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2033             return EXCP_INTERRUPT;
2034         }
2035     }
2036     /* FIXME: Should trigger a qmp message to let management know
2037      * something went wrong.
2038      */
2039     return -1;
2040 }
2041 
2042 void kvm_flush_coalesced_mmio_buffer(void)
2043 {
2044     KVMState *s = kvm_state;
2045 
2046     if (s->coalesced_flush_in_progress) {
2047         return;
2048     }
2049 
2050     s->coalesced_flush_in_progress = true;
2051 
2052     if (s->coalesced_mmio_ring) {
2053         struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2054         while (ring->first != ring->last) {
2055             struct kvm_coalesced_mmio *ent;
2056 
2057             ent = &ring->coalesced_mmio[ring->first];
2058 
2059             if (ent->pio == 1) {
2060                 address_space_rw(&address_space_io, ent->phys_addr,
2061                                  MEMTXATTRS_UNSPECIFIED, ent->data,
2062                                  ent->len, true);
2063             } else {
2064                 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2065             }
2066             smp_wmb();
2067             ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2068         }
2069     }
2070 
2071     s->coalesced_flush_in_progress = false;
2072 }
2073 
2074 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2075 {
2076     if (!cpu->vcpu_dirty) {
2077         kvm_arch_get_registers(cpu);
2078         cpu->vcpu_dirty = true;
2079     }
2080 }
2081 
2082 void kvm_cpu_synchronize_state(CPUState *cpu)
2083 {
2084     if (!cpu->vcpu_dirty) {
2085         run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2086     }
2087 }
2088 
2089 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2090 {
2091     kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2092     cpu->vcpu_dirty = false;
2093 }
2094 
2095 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2096 {
2097     run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2098 }
2099 
2100 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2101 {
2102     kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2103     cpu->vcpu_dirty = false;
2104 }
2105 
2106 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2107 {
2108     run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2109 }
2110 
2111 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2112 {
2113     cpu->vcpu_dirty = true;
2114 }
2115 
2116 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2117 {
2118     run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2119 }
2120 
2121 #ifdef KVM_HAVE_MCE_INJECTION
2122 static __thread void *pending_sigbus_addr;
2123 static __thread int pending_sigbus_code;
2124 static __thread bool have_sigbus_pending;
2125 #endif
2126 
2127 static void kvm_cpu_kick(CPUState *cpu)
2128 {
2129     atomic_set(&cpu->kvm_run->immediate_exit, 1);
2130 }
2131 
2132 static void kvm_cpu_kick_self(void)
2133 {
2134     if (kvm_immediate_exit) {
2135         kvm_cpu_kick(current_cpu);
2136     } else {
2137         qemu_cpu_kick_self();
2138     }
2139 }
2140 
2141 static void kvm_eat_signals(CPUState *cpu)
2142 {
2143     struct timespec ts = { 0, 0 };
2144     siginfo_t siginfo;
2145     sigset_t waitset;
2146     sigset_t chkset;
2147     int r;
2148 
2149     if (kvm_immediate_exit) {
2150         atomic_set(&cpu->kvm_run->immediate_exit, 0);
2151         /* Write kvm_run->immediate_exit before the cpu->exit_request
2152          * write in kvm_cpu_exec.
2153          */
2154         smp_wmb();
2155         return;
2156     }
2157 
2158     sigemptyset(&waitset);
2159     sigaddset(&waitset, SIG_IPI);
2160 
2161     do {
2162         r = sigtimedwait(&waitset, &siginfo, &ts);
2163         if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2164             perror("sigtimedwait");
2165             exit(1);
2166         }
2167 
2168         r = sigpending(&chkset);
2169         if (r == -1) {
2170             perror("sigpending");
2171             exit(1);
2172         }
2173     } while (sigismember(&chkset, SIG_IPI));
2174 }
2175 
2176 int kvm_cpu_exec(CPUState *cpu)
2177 {
2178     struct kvm_run *run = cpu->kvm_run;
2179     int ret, run_ret;
2180 
2181     DPRINTF("kvm_cpu_exec()\n");
2182 
2183     if (kvm_arch_process_async_events(cpu)) {
2184         atomic_set(&cpu->exit_request, 0);
2185         return EXCP_HLT;
2186     }
2187 
2188     qemu_mutex_unlock_iothread();
2189     cpu_exec_start(cpu);
2190 
2191     do {
2192         MemTxAttrs attrs;
2193 
2194         if (cpu->vcpu_dirty) {
2195             kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2196             cpu->vcpu_dirty = false;
2197         }
2198 
2199         kvm_arch_pre_run(cpu, run);
2200         if (atomic_read(&cpu->exit_request)) {
2201             DPRINTF("interrupt exit requested\n");
2202             /*
2203              * KVM requires us to reenter the kernel after IO exits to complete
2204              * instruction emulation. This self-signal will ensure that we
2205              * leave ASAP again.
2206              */
2207             kvm_cpu_kick_self();
2208         }
2209 
2210         /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2211          * Matching barrier in kvm_eat_signals.
2212          */
2213         smp_rmb();
2214 
2215         run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2216 
2217         attrs = kvm_arch_post_run(cpu, run);
2218 
2219 #ifdef KVM_HAVE_MCE_INJECTION
2220         if (unlikely(have_sigbus_pending)) {
2221             qemu_mutex_lock_iothread();
2222             kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
2223                                     pending_sigbus_addr);
2224             have_sigbus_pending = false;
2225             qemu_mutex_unlock_iothread();
2226         }
2227 #endif
2228 
2229         if (run_ret < 0) {
2230             if (run_ret == -EINTR || run_ret == -EAGAIN) {
2231                 DPRINTF("io window exit\n");
2232                 kvm_eat_signals(cpu);
2233                 ret = EXCP_INTERRUPT;
2234                 break;
2235             }
2236             fprintf(stderr, "error: kvm run failed %s\n",
2237                     strerror(-run_ret));
2238 #ifdef TARGET_PPC
2239             if (run_ret == -EBUSY) {
2240                 fprintf(stderr,
2241                         "This is probably because your SMT is enabled.\n"
2242                         "VCPU can only run on primary threads with all "
2243                         "secondary threads offline.\n");
2244             }
2245 #endif
2246             ret = -1;
2247             break;
2248         }
2249 
2250         trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2251         switch (run->exit_reason) {
2252         case KVM_EXIT_IO:
2253             DPRINTF("handle_io\n");
2254             /* Called outside BQL */
2255             kvm_handle_io(run->io.port, attrs,
2256                           (uint8_t *)run + run->io.data_offset,
2257                           run->io.direction,
2258                           run->io.size,
2259                           run->io.count);
2260             ret = 0;
2261             break;
2262         case KVM_EXIT_MMIO:
2263             DPRINTF("handle_mmio\n");
2264             /* Called outside BQL */
2265             address_space_rw(&address_space_memory,
2266                              run->mmio.phys_addr, attrs,
2267                              run->mmio.data,
2268                              run->mmio.len,
2269                              run->mmio.is_write);
2270             ret = 0;
2271             break;
2272         case KVM_EXIT_IRQ_WINDOW_OPEN:
2273             DPRINTF("irq_window_open\n");
2274             ret = EXCP_INTERRUPT;
2275             break;
2276         case KVM_EXIT_SHUTDOWN:
2277             DPRINTF("shutdown\n");
2278             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2279             ret = EXCP_INTERRUPT;
2280             break;
2281         case KVM_EXIT_UNKNOWN:
2282             fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2283                     (uint64_t)run->hw.hardware_exit_reason);
2284             ret = -1;
2285             break;
2286         case KVM_EXIT_INTERNAL_ERROR:
2287             ret = kvm_handle_internal_error(cpu, run);
2288             break;
2289         case KVM_EXIT_SYSTEM_EVENT:
2290             switch (run->system_event.type) {
2291             case KVM_SYSTEM_EVENT_SHUTDOWN:
2292                 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
2293                 ret = EXCP_INTERRUPT;
2294                 break;
2295             case KVM_SYSTEM_EVENT_RESET:
2296                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2297                 ret = EXCP_INTERRUPT;
2298                 break;
2299             case KVM_SYSTEM_EVENT_CRASH:
2300                 kvm_cpu_synchronize_state(cpu);
2301                 qemu_mutex_lock_iothread();
2302                 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2303                 qemu_mutex_unlock_iothread();
2304                 ret = 0;
2305                 break;
2306             default:
2307                 DPRINTF("kvm_arch_handle_exit\n");
2308                 ret = kvm_arch_handle_exit(cpu, run);
2309                 break;
2310             }
2311             break;
2312         default:
2313             DPRINTF("kvm_arch_handle_exit\n");
2314             ret = kvm_arch_handle_exit(cpu, run);
2315             break;
2316         }
2317     } while (ret == 0);
2318 
2319     cpu_exec_end(cpu);
2320     qemu_mutex_lock_iothread();
2321 
2322     if (ret < 0) {
2323         cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2324         vm_stop(RUN_STATE_INTERNAL_ERROR);
2325     }
2326 
2327     atomic_set(&cpu->exit_request, 0);
2328     return ret;
2329 }
2330 
2331 int kvm_ioctl(KVMState *s, int type, ...)
2332 {
2333     int ret;
2334     void *arg;
2335     va_list ap;
2336 
2337     va_start(ap, type);
2338     arg = va_arg(ap, void *);
2339     va_end(ap);
2340 
2341     trace_kvm_ioctl(type, arg);
2342     ret = ioctl(s->fd, type, arg);
2343     if (ret == -1) {
2344         ret = -errno;
2345     }
2346     return ret;
2347 }
2348 
2349 int kvm_vm_ioctl(KVMState *s, int type, ...)
2350 {
2351     int ret;
2352     void *arg;
2353     va_list ap;
2354 
2355     va_start(ap, type);
2356     arg = va_arg(ap, void *);
2357     va_end(ap);
2358 
2359     trace_kvm_vm_ioctl(type, arg);
2360     ret = ioctl(s->vmfd, type, arg);
2361     if (ret == -1) {
2362         ret = -errno;
2363     }
2364     return ret;
2365 }
2366 
2367 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
2368 {
2369     int ret;
2370     void *arg;
2371     va_list ap;
2372 
2373     va_start(ap, type);
2374     arg = va_arg(ap, void *);
2375     va_end(ap);
2376 
2377     trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
2378     ret = ioctl(cpu->kvm_fd, type, arg);
2379     if (ret == -1) {
2380         ret = -errno;
2381     }
2382     return ret;
2383 }
2384 
2385 int kvm_device_ioctl(int fd, int type, ...)
2386 {
2387     int ret;
2388     void *arg;
2389     va_list ap;
2390 
2391     va_start(ap, type);
2392     arg = va_arg(ap, void *);
2393     va_end(ap);
2394 
2395     trace_kvm_device_ioctl(fd, type, arg);
2396     ret = ioctl(fd, type, arg);
2397     if (ret == -1) {
2398         ret = -errno;
2399     }
2400     return ret;
2401 }
2402 
2403 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
2404 {
2405     int ret;
2406     struct kvm_device_attr attribute = {
2407         .group = group,
2408         .attr = attr,
2409     };
2410 
2411     if (!kvm_vm_attributes_allowed) {
2412         return 0;
2413     }
2414 
2415     ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
2416     /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2417     return ret ? 0 : 1;
2418 }
2419 
2420 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2421 {
2422     struct kvm_device_attr attribute = {
2423         .group = group,
2424         .attr = attr,
2425         .flags = 0,
2426     };
2427 
2428     return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
2429 }
2430 
2431 int kvm_device_access(int fd, int group, uint64_t attr,
2432                       void *val, bool write, Error **errp)
2433 {
2434     struct kvm_device_attr kvmattr;
2435     int err;
2436 
2437     kvmattr.flags = 0;
2438     kvmattr.group = group;
2439     kvmattr.attr = attr;
2440     kvmattr.addr = (uintptr_t)val;
2441 
2442     err = kvm_device_ioctl(fd,
2443                            write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2444                            &kvmattr);
2445     if (err < 0) {
2446         error_setg_errno(errp, -err,
2447                          "KVM_%s_DEVICE_ATTR failed: Group %d "
2448                          "attr 0x%016" PRIx64,
2449                          write ? "SET" : "GET", group, attr);
2450     }
2451     return err;
2452 }
2453 
2454 bool kvm_has_sync_mmu(void)
2455 {
2456     return kvm_state->sync_mmu;
2457 }
2458 
2459 int kvm_has_vcpu_events(void)
2460 {
2461     return kvm_state->vcpu_events;
2462 }
2463 
2464 int kvm_has_robust_singlestep(void)
2465 {
2466     return kvm_state->robust_singlestep;
2467 }
2468 
2469 int kvm_has_debugregs(void)
2470 {
2471     return kvm_state->debugregs;
2472 }
2473 
2474 int kvm_max_nested_state_length(void)
2475 {
2476     return kvm_state->max_nested_state_len;
2477 }
2478 
2479 int kvm_has_many_ioeventfds(void)
2480 {
2481     if (!kvm_enabled()) {
2482         return 0;
2483     }
2484     return kvm_state->many_ioeventfds;
2485 }
2486 
2487 int kvm_has_gsi_routing(void)
2488 {
2489 #ifdef KVM_CAP_IRQ_ROUTING
2490     return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
2491 #else
2492     return false;
2493 #endif
2494 }
2495 
2496 int kvm_has_intx_set_mask(void)
2497 {
2498     return kvm_state->intx_set_mask;
2499 }
2500 
2501 bool kvm_arm_supports_user_irq(void)
2502 {
2503     return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
2504 }
2505 
2506 #ifdef KVM_CAP_SET_GUEST_DEBUG
2507 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
2508                                                  target_ulong pc)
2509 {
2510     struct kvm_sw_breakpoint *bp;
2511 
2512     QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
2513         if (bp->pc == pc) {
2514             return bp;
2515         }
2516     }
2517     return NULL;
2518 }
2519 
2520 int kvm_sw_breakpoints_active(CPUState *cpu)
2521 {
2522     return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
2523 }
2524 
2525 struct kvm_set_guest_debug_data {
2526     struct kvm_guest_debug dbg;
2527     int err;
2528 };
2529 
2530 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
2531 {
2532     struct kvm_set_guest_debug_data *dbg_data =
2533         (struct kvm_set_guest_debug_data *) data.host_ptr;
2534 
2535     dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
2536                                    &dbg_data->dbg);
2537 }
2538 
2539 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2540 {
2541     struct kvm_set_guest_debug_data data;
2542 
2543     data.dbg.control = reinject_trap;
2544 
2545     if (cpu->singlestep_enabled) {
2546         data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2547     }
2548     kvm_arch_update_guest_debug(cpu, &data.dbg);
2549 
2550     run_on_cpu(cpu, kvm_invoke_set_guest_debug,
2551                RUN_ON_CPU_HOST_PTR(&data));
2552     return data.err;
2553 }
2554 
2555 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2556                           target_ulong len, int type)
2557 {
2558     struct kvm_sw_breakpoint *bp;
2559     int err;
2560 
2561     if (type == GDB_BREAKPOINT_SW) {
2562         bp = kvm_find_sw_breakpoint(cpu, addr);
2563         if (bp) {
2564             bp->use_count++;
2565             return 0;
2566         }
2567 
2568         bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2569         bp->pc = addr;
2570         bp->use_count = 1;
2571         err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2572         if (err) {
2573             g_free(bp);
2574             return err;
2575         }
2576 
2577         QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2578     } else {
2579         err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2580         if (err) {
2581             return err;
2582         }
2583     }
2584 
2585     CPU_FOREACH(cpu) {
2586         err = kvm_update_guest_debug(cpu, 0);
2587         if (err) {
2588             return err;
2589         }
2590     }
2591     return 0;
2592 }
2593 
2594 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2595                           target_ulong len, int type)
2596 {
2597     struct kvm_sw_breakpoint *bp;
2598     int err;
2599 
2600     if (type == GDB_BREAKPOINT_SW) {
2601         bp = kvm_find_sw_breakpoint(cpu, addr);
2602         if (!bp) {
2603             return -ENOENT;
2604         }
2605 
2606         if (bp->use_count > 1) {
2607             bp->use_count--;
2608             return 0;
2609         }
2610 
2611         err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2612         if (err) {
2613             return err;
2614         }
2615 
2616         QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2617         g_free(bp);
2618     } else {
2619         err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2620         if (err) {
2621             return err;
2622         }
2623     }
2624 
2625     CPU_FOREACH(cpu) {
2626         err = kvm_update_guest_debug(cpu, 0);
2627         if (err) {
2628             return err;
2629         }
2630     }
2631     return 0;
2632 }
2633 
2634 void kvm_remove_all_breakpoints(CPUState *cpu)
2635 {
2636     struct kvm_sw_breakpoint *bp, *next;
2637     KVMState *s = cpu->kvm_state;
2638     CPUState *tmpcpu;
2639 
2640     QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2641         if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2642             /* Try harder to find a CPU that currently sees the breakpoint. */
2643             CPU_FOREACH(tmpcpu) {
2644                 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2645                     break;
2646                 }
2647             }
2648         }
2649         QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2650         g_free(bp);
2651     }
2652     kvm_arch_remove_all_hw_breakpoints();
2653 
2654     CPU_FOREACH(cpu) {
2655         kvm_update_guest_debug(cpu, 0);
2656     }
2657 }
2658 
2659 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2660 
2661 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2662 {
2663     return -EINVAL;
2664 }
2665 
2666 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2667                           target_ulong len, int type)
2668 {
2669     return -EINVAL;
2670 }
2671 
2672 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2673                           target_ulong len, int type)
2674 {
2675     return -EINVAL;
2676 }
2677 
2678 void kvm_remove_all_breakpoints(CPUState *cpu)
2679 {
2680 }
2681 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2682 
2683 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2684 {
2685     KVMState *s = kvm_state;
2686     struct kvm_signal_mask *sigmask;
2687     int r;
2688 
2689     sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2690 
2691     sigmask->len = s->sigmask_len;
2692     memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2693     r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2694     g_free(sigmask);
2695 
2696     return r;
2697 }
2698 
2699 static void kvm_ipi_signal(int sig)
2700 {
2701     if (current_cpu) {
2702         assert(kvm_immediate_exit);
2703         kvm_cpu_kick(current_cpu);
2704     }
2705 }
2706 
2707 void kvm_init_cpu_signals(CPUState *cpu)
2708 {
2709     int r;
2710     sigset_t set;
2711     struct sigaction sigact;
2712 
2713     memset(&sigact, 0, sizeof(sigact));
2714     sigact.sa_handler = kvm_ipi_signal;
2715     sigaction(SIG_IPI, &sigact, NULL);
2716 
2717     pthread_sigmask(SIG_BLOCK, NULL, &set);
2718 #if defined KVM_HAVE_MCE_INJECTION
2719     sigdelset(&set, SIGBUS);
2720     pthread_sigmask(SIG_SETMASK, &set, NULL);
2721 #endif
2722     sigdelset(&set, SIG_IPI);
2723     if (kvm_immediate_exit) {
2724         r = pthread_sigmask(SIG_SETMASK, &set, NULL);
2725     } else {
2726         r = kvm_set_signal_mask(cpu, &set);
2727     }
2728     if (r) {
2729         fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
2730         exit(1);
2731     }
2732 }
2733 
2734 /* Called asynchronously in VCPU thread.  */
2735 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2736 {
2737 #ifdef KVM_HAVE_MCE_INJECTION
2738     if (have_sigbus_pending) {
2739         return 1;
2740     }
2741     have_sigbus_pending = true;
2742     pending_sigbus_addr = addr;
2743     pending_sigbus_code = code;
2744     atomic_set(&cpu->exit_request, 1);
2745     return 0;
2746 #else
2747     return 1;
2748 #endif
2749 }
2750 
2751 /* Called synchronously (via signalfd) in main thread.  */
2752 int kvm_on_sigbus(int code, void *addr)
2753 {
2754 #ifdef KVM_HAVE_MCE_INJECTION
2755     /* Action required MCE kills the process if SIGBUS is blocked.  Because
2756      * that's what happens in the I/O thread, where we handle MCE via signalfd,
2757      * we can only get action optional here.
2758      */
2759     assert(code != BUS_MCEERR_AR);
2760     kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
2761     return 0;
2762 #else
2763     return 1;
2764 #endif
2765 }
2766 
2767 int kvm_create_device(KVMState *s, uint64_t type, bool test)
2768 {
2769     int ret;
2770     struct kvm_create_device create_dev;
2771 
2772     create_dev.type = type;
2773     create_dev.fd = -1;
2774     create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2775 
2776     if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
2777         return -ENOTSUP;
2778     }
2779 
2780     ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
2781     if (ret) {
2782         return ret;
2783     }
2784 
2785     return test ? 0 : create_dev.fd;
2786 }
2787 
2788 bool kvm_device_supported(int vmfd, uint64_t type)
2789 {
2790     struct kvm_create_device create_dev = {
2791         .type = type,
2792         .fd = -1,
2793         .flags = KVM_CREATE_DEVICE_TEST,
2794     };
2795 
2796     if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
2797         return false;
2798     }
2799 
2800     return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
2801 }
2802 
2803 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
2804 {
2805     struct kvm_one_reg reg;
2806     int r;
2807 
2808     reg.id = id;
2809     reg.addr = (uintptr_t) source;
2810     r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
2811     if (r) {
2812         trace_kvm_failed_reg_set(id, strerror(-r));
2813     }
2814     return r;
2815 }
2816 
2817 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
2818 {
2819     struct kvm_one_reg reg;
2820     int r;
2821 
2822     reg.id = id;
2823     reg.addr = (uintptr_t) target;
2824     r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
2825     if (r) {
2826         trace_kvm_failed_reg_get(id, strerror(-r));
2827     }
2828     return r;
2829 }
2830 
2831 static void kvm_accel_class_init(ObjectClass *oc, void *data)
2832 {
2833     AccelClass *ac = ACCEL_CLASS(oc);
2834     ac->name = "KVM";
2835     ac->init_machine = kvm_init;
2836     ac->allowed = &kvm_allowed;
2837 }
2838 
2839 static const TypeInfo kvm_accel_type = {
2840     .name = TYPE_KVM_ACCEL,
2841     .parent = TYPE_ACCEL,
2842     .class_init = kvm_accel_class_init,
2843     .instance_size = sizeof(KVMState),
2844 };
2845 
2846 static void kvm_type_init(void)
2847 {
2848     type_register_static(&kvm_accel_type);
2849 }
2850 
2851 type_init(kvm_type_init);
2852