xref: /openbmc/qemu/accel/kvm/kvm-all.c (revision 4376c40d)
1 /*
2  * QEMU KVM support
3  *
4  * Copyright IBM, Corp. 2008
5  *           Red Hat, Inc. 2008
6  *
7  * Authors:
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *  Glauber Costa     <gcosta@redhat.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  *
14  */
15 
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 
19 #include <linux/kvm.h>
20 
21 #include "qemu/atomic.h"
22 #include "qemu/option.h"
23 #include "qemu/config-file.h"
24 #include "qemu/error-report.h"
25 #include "qapi/error.h"
26 #include "hw/pci/msi.h"
27 #include "hw/pci/msix.h"
28 #include "hw/s390x/adapter.h"
29 #include "exec/gdbstub.h"
30 #include "sysemu/kvm_int.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/cpus.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bswap.h"
35 #include "exec/memory.h"
36 #include "exec/ram_addr.h"
37 #include "exec/address-spaces.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
40 #include "trace.h"
41 #include "hw/irq.h"
42 #include "sysemu/sev.h"
43 #include "sysemu/balloon.h"
44 #include "qapi/visitor.h"
45 
46 #include "hw/boards.h"
47 
48 /* This check must be after config-host.h is included */
49 #ifdef CONFIG_EVENTFD
50 #include <sys/eventfd.h>
51 #endif
52 
53 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
54  * need to use the real host PAGE_SIZE, as that's what KVM will use.
55  */
56 #define PAGE_SIZE qemu_real_host_page_size
57 
58 //#define DEBUG_KVM
59 
60 #ifdef DEBUG_KVM
61 #define DPRINTF(fmt, ...) \
62     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
63 #else
64 #define DPRINTF(fmt, ...) \
65     do { } while (0)
66 #endif
67 
68 #define KVM_MSI_HASHTAB_SIZE    256
69 
70 struct KVMParkedVcpu {
71     unsigned long vcpu_id;
72     int kvm_fd;
73     QLIST_ENTRY(KVMParkedVcpu) node;
74 };
75 
76 struct KVMState
77 {
78     AccelState parent_obj;
79 
80     int nr_slots;
81     int fd;
82     int vmfd;
83     int coalesced_mmio;
84     int coalesced_pio;
85     struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
86     bool coalesced_flush_in_progress;
87     int vcpu_events;
88     int robust_singlestep;
89     int debugregs;
90 #ifdef KVM_CAP_SET_GUEST_DEBUG
91     QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
92 #endif
93     int max_nested_state_len;
94     int many_ioeventfds;
95     int intx_set_mask;
96     int kvm_shadow_mem;
97     bool sync_mmu;
98     bool manual_dirty_log_protect;
99     /* The man page (and posix) say ioctl numbers are signed int, but
100      * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
101      * unsigned, and treating them as signed here can break things */
102     unsigned irq_set_ioctl;
103     unsigned int sigmask_len;
104     GHashTable *gsimap;
105 #ifdef KVM_CAP_IRQ_ROUTING
106     struct kvm_irq_routing *irq_routes;
107     int nr_allocated_irq_routes;
108     unsigned long *used_gsi_bitmap;
109     unsigned int gsi_count;
110     QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
111 #endif
112     KVMMemoryListener memory_listener;
113     QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
114 
115     /* memory encryption */
116     void *memcrypt_handle;
117     int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len);
118 
119     /* For "info mtree -f" to tell if an MR is registered in KVM */
120     int nr_as;
121     struct KVMAs {
122         KVMMemoryListener *ml;
123         AddressSpace *as;
124     } *as;
125 };
126 
127 KVMState *kvm_state;
128 bool kvm_kernel_irqchip;
129 bool kvm_split_irqchip;
130 bool kvm_async_interrupts_allowed;
131 bool kvm_halt_in_kernel_allowed;
132 bool kvm_eventfds_allowed;
133 bool kvm_irqfds_allowed;
134 bool kvm_resamplefds_allowed;
135 bool kvm_msi_via_irqfd_allowed;
136 bool kvm_gsi_routing_allowed;
137 bool kvm_gsi_direct_mapping;
138 bool kvm_allowed;
139 bool kvm_readonly_mem_allowed;
140 bool kvm_vm_attributes_allowed;
141 bool kvm_direct_msi_allowed;
142 bool kvm_ioeventfd_any_length_allowed;
143 bool kvm_msi_use_devid;
144 static bool kvm_immediate_exit;
145 static hwaddr kvm_max_slot_size = ~0;
146 
147 static const KVMCapabilityInfo kvm_required_capabilites[] = {
148     KVM_CAP_INFO(USER_MEMORY),
149     KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
150     KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
151     KVM_CAP_LAST_INFO
152 };
153 
154 static NotifierList kvm_irqchip_change_notifiers =
155     NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
156 
157 #define kvm_slots_lock(kml)      qemu_mutex_lock(&(kml)->slots_lock)
158 #define kvm_slots_unlock(kml)    qemu_mutex_unlock(&(kml)->slots_lock)
159 
160 int kvm_get_max_memslots(void)
161 {
162     KVMState *s = KVM_STATE(current_machine->accelerator);
163 
164     return s->nr_slots;
165 }
166 
167 bool kvm_memcrypt_enabled(void)
168 {
169     if (kvm_state && kvm_state->memcrypt_handle) {
170         return true;
171     }
172 
173     return false;
174 }
175 
176 int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
177 {
178     if (kvm_state->memcrypt_handle &&
179         kvm_state->memcrypt_encrypt_data) {
180         return kvm_state->memcrypt_encrypt_data(kvm_state->memcrypt_handle,
181                                               ptr, len);
182     }
183 
184     return 1;
185 }
186 
187 /* Called with KVMMemoryListener.slots_lock held */
188 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
189 {
190     KVMState *s = kvm_state;
191     int i;
192 
193     for (i = 0; i < s->nr_slots; i++) {
194         if (kml->slots[i].memory_size == 0) {
195             return &kml->slots[i];
196         }
197     }
198 
199     return NULL;
200 }
201 
202 bool kvm_has_free_slot(MachineState *ms)
203 {
204     KVMState *s = KVM_STATE(ms->accelerator);
205     bool result;
206     KVMMemoryListener *kml = &s->memory_listener;
207 
208     kvm_slots_lock(kml);
209     result = !!kvm_get_free_slot(kml);
210     kvm_slots_unlock(kml);
211 
212     return result;
213 }
214 
215 /* Called with KVMMemoryListener.slots_lock held */
216 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
217 {
218     KVMSlot *slot = kvm_get_free_slot(kml);
219 
220     if (slot) {
221         return slot;
222     }
223 
224     fprintf(stderr, "%s: no free slot available\n", __func__);
225     abort();
226 }
227 
228 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
229                                          hwaddr start_addr,
230                                          hwaddr size)
231 {
232     KVMState *s = kvm_state;
233     int i;
234 
235     for (i = 0; i < s->nr_slots; i++) {
236         KVMSlot *mem = &kml->slots[i];
237 
238         if (start_addr == mem->start_addr && size == mem->memory_size) {
239             return mem;
240         }
241     }
242 
243     return NULL;
244 }
245 
246 /*
247  * Calculate and align the start address and the size of the section.
248  * Return the size. If the size is 0, the aligned section is empty.
249  */
250 static hwaddr kvm_align_section(MemoryRegionSection *section,
251                                 hwaddr *start)
252 {
253     hwaddr size = int128_get64(section->size);
254     hwaddr delta, aligned;
255 
256     /* kvm works in page size chunks, but the function may be called
257        with sub-page size and unaligned start address. Pad the start
258        address to next and truncate size to previous page boundary. */
259     aligned = ROUND_UP(section->offset_within_address_space,
260                        qemu_real_host_page_size);
261     delta = aligned - section->offset_within_address_space;
262     *start = aligned;
263     if (delta > size) {
264         return 0;
265     }
266 
267     return (size - delta) & qemu_real_host_page_mask;
268 }
269 
270 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
271                                        hwaddr *phys_addr)
272 {
273     KVMMemoryListener *kml = &s->memory_listener;
274     int i, ret = 0;
275 
276     kvm_slots_lock(kml);
277     for (i = 0; i < s->nr_slots; i++) {
278         KVMSlot *mem = &kml->slots[i];
279 
280         if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
281             *phys_addr = mem->start_addr + (ram - mem->ram);
282             ret = 1;
283             break;
284         }
285     }
286     kvm_slots_unlock(kml);
287 
288     return ret;
289 }
290 
291 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
292 {
293     KVMState *s = kvm_state;
294     struct kvm_userspace_memory_region mem;
295     int ret;
296 
297     mem.slot = slot->slot | (kml->as_id << 16);
298     mem.guest_phys_addr = slot->start_addr;
299     mem.userspace_addr = (unsigned long)slot->ram;
300     mem.flags = slot->flags;
301 
302     if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
303         /* Set the slot size to 0 before setting the slot to the desired
304          * value. This is needed based on KVM commit 75d61fbc. */
305         mem.memory_size = 0;
306         kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
307     }
308     mem.memory_size = slot->memory_size;
309     ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
310     slot->old_flags = mem.flags;
311     trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
312                               mem.memory_size, mem.userspace_addr, ret);
313     return ret;
314 }
315 
316 int kvm_destroy_vcpu(CPUState *cpu)
317 {
318     KVMState *s = kvm_state;
319     long mmap_size;
320     struct KVMParkedVcpu *vcpu = NULL;
321     int ret = 0;
322 
323     DPRINTF("kvm_destroy_vcpu\n");
324 
325     ret = kvm_arch_destroy_vcpu(cpu);
326     if (ret < 0) {
327         goto err;
328     }
329 
330     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
331     if (mmap_size < 0) {
332         ret = mmap_size;
333         DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
334         goto err;
335     }
336 
337     ret = munmap(cpu->kvm_run, mmap_size);
338     if (ret < 0) {
339         goto err;
340     }
341 
342     vcpu = g_malloc0(sizeof(*vcpu));
343     vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
344     vcpu->kvm_fd = cpu->kvm_fd;
345     QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
346 err:
347     return ret;
348 }
349 
350 static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
351 {
352     struct KVMParkedVcpu *cpu;
353 
354     QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
355         if (cpu->vcpu_id == vcpu_id) {
356             int kvm_fd;
357 
358             QLIST_REMOVE(cpu, node);
359             kvm_fd = cpu->kvm_fd;
360             g_free(cpu);
361             return kvm_fd;
362         }
363     }
364 
365     return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
366 }
367 
368 int kvm_init_vcpu(CPUState *cpu)
369 {
370     KVMState *s = kvm_state;
371     long mmap_size;
372     int ret;
373 
374     DPRINTF("kvm_init_vcpu\n");
375 
376     ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
377     if (ret < 0) {
378         DPRINTF("kvm_create_vcpu failed\n");
379         goto err;
380     }
381 
382     cpu->kvm_fd = ret;
383     cpu->kvm_state = s;
384     cpu->vcpu_dirty = true;
385 
386     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
387     if (mmap_size < 0) {
388         ret = mmap_size;
389         DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
390         goto err;
391     }
392 
393     cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
394                         cpu->kvm_fd, 0);
395     if (cpu->kvm_run == MAP_FAILED) {
396         ret = -errno;
397         DPRINTF("mmap'ing vcpu state failed\n");
398         goto err;
399     }
400 
401     if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
402         s->coalesced_mmio_ring =
403             (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
404     }
405 
406     ret = kvm_arch_init_vcpu(cpu);
407 err:
408     return ret;
409 }
410 
411 /*
412  * dirty pages logging control
413  */
414 
415 static int kvm_mem_flags(MemoryRegion *mr)
416 {
417     bool readonly = mr->readonly || memory_region_is_romd(mr);
418     int flags = 0;
419 
420     if (memory_region_get_dirty_log_mask(mr) != 0) {
421         flags |= KVM_MEM_LOG_DIRTY_PAGES;
422     }
423     if (readonly && kvm_readonly_mem_allowed) {
424         flags |= KVM_MEM_READONLY;
425     }
426     return flags;
427 }
428 
429 /* Called with KVMMemoryListener.slots_lock held */
430 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
431                                  MemoryRegion *mr)
432 {
433     mem->flags = kvm_mem_flags(mr);
434 
435     /* If nothing changed effectively, no need to issue ioctl */
436     if (mem->flags == mem->old_flags) {
437         return 0;
438     }
439 
440     return kvm_set_user_memory_region(kml, mem, false);
441 }
442 
443 static int kvm_section_update_flags(KVMMemoryListener *kml,
444                                     MemoryRegionSection *section)
445 {
446     hwaddr start_addr, size, slot_size;
447     KVMSlot *mem;
448     int ret = 0;
449 
450     size = kvm_align_section(section, &start_addr);
451     if (!size) {
452         return 0;
453     }
454 
455     kvm_slots_lock(kml);
456 
457     while (size && !ret) {
458         slot_size = MIN(kvm_max_slot_size, size);
459         mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
460         if (!mem) {
461             /* We don't have a slot if we want to trap every access. */
462             goto out;
463         }
464 
465         ret = kvm_slot_update_flags(kml, mem, section->mr);
466         start_addr += slot_size;
467         size -= slot_size;
468     }
469 
470 out:
471     kvm_slots_unlock(kml);
472     return ret;
473 }
474 
475 static void kvm_log_start(MemoryListener *listener,
476                           MemoryRegionSection *section,
477                           int old, int new)
478 {
479     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
480     int r;
481 
482     if (old != 0) {
483         return;
484     }
485 
486     r = kvm_section_update_flags(kml, section);
487     if (r < 0) {
488         abort();
489     }
490 }
491 
492 static void kvm_log_stop(MemoryListener *listener,
493                           MemoryRegionSection *section,
494                           int old, int new)
495 {
496     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
497     int r;
498 
499     if (new != 0) {
500         return;
501     }
502 
503     r = kvm_section_update_flags(kml, section);
504     if (r < 0) {
505         abort();
506     }
507 }
508 
509 /* get kvm's dirty pages bitmap and update qemu's */
510 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
511                                          unsigned long *bitmap)
512 {
513     ram_addr_t start = section->offset_within_region +
514                        memory_region_get_ram_addr(section->mr);
515     ram_addr_t pages = int128_get64(section->size) / qemu_real_host_page_size;
516 
517     cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
518     return 0;
519 }
520 
521 #define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
522 
523 /* Allocate the dirty bitmap for a slot  */
524 static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem)
525 {
526     /*
527      * XXX bad kernel interface alert
528      * For dirty bitmap, kernel allocates array of size aligned to
529      * bits-per-long.  But for case when the kernel is 64bits and
530      * the userspace is 32bits, userspace can't align to the same
531      * bits-per-long, since sizeof(long) is different between kernel
532      * and user space.  This way, userspace will provide buffer which
533      * may be 4 bytes less than the kernel will use, resulting in
534      * userspace memory corruption (which is not detectable by valgrind
535      * too, in most cases).
536      * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
537      * a hope that sizeof(long) won't become >8 any time soon.
538      */
539     hwaddr bitmap_size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
540                                         /*HOST_LONG_BITS*/ 64) / 8;
541     mem->dirty_bmap = g_malloc0(bitmap_size);
542 }
543 
544 /**
545  * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
546  *
547  * This function will first try to fetch dirty bitmap from the kernel,
548  * and then updates qemu's dirty bitmap.
549  *
550  * NOTE: caller must be with kml->slots_lock held.
551  *
552  * @kml: the KVM memory listener object
553  * @section: the memory section to sync the dirty bitmap with
554  */
555 static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
556                                           MemoryRegionSection *section)
557 {
558     KVMState *s = kvm_state;
559     struct kvm_dirty_log d = {};
560     KVMSlot *mem;
561     hwaddr start_addr, size;
562     hwaddr slot_size, slot_offset = 0;
563     int ret = 0;
564 
565     size = kvm_align_section(section, &start_addr);
566     while (size) {
567         MemoryRegionSection subsection = *section;
568 
569         slot_size = MIN(kvm_max_slot_size, size);
570         mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
571         if (!mem) {
572             /* We don't have a slot if we want to trap every access. */
573             goto out;
574         }
575 
576         if (!mem->dirty_bmap) {
577             /* Allocate on the first log_sync, once and for all */
578             kvm_memslot_init_dirty_bitmap(mem);
579         }
580 
581         d.dirty_bitmap = mem->dirty_bmap;
582         d.slot = mem->slot | (kml->as_id << 16);
583         if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
584             DPRINTF("ioctl failed %d\n", errno);
585             ret = -1;
586             goto out;
587         }
588 
589         subsection.offset_within_region += slot_offset;
590         subsection.size = int128_make64(slot_size);
591         kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap);
592 
593         slot_offset += slot_size;
594         start_addr += slot_size;
595         size -= slot_size;
596     }
597 out:
598     return ret;
599 }
600 
601 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
602 #define KVM_CLEAR_LOG_SHIFT  6
603 #define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
604 #define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)
605 
606 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
607                                   uint64_t size)
608 {
609     KVMState *s = kvm_state;
610     uint64_t end, bmap_start, start_delta, bmap_npages;
611     struct kvm_clear_dirty_log d;
612     unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
613     int ret;
614 
615     /*
616      * We need to extend either the start or the size or both to
617      * satisfy the KVM interface requirement.  Firstly, do the start
618      * page alignment on 64 host pages
619      */
620     bmap_start = start & KVM_CLEAR_LOG_MASK;
621     start_delta = start - bmap_start;
622     bmap_start /= psize;
623 
624     /*
625      * The kernel interface has restriction on the size too, that either:
626      *
627      * (1) the size is 64 host pages aligned (just like the start), or
628      * (2) the size fills up until the end of the KVM memslot.
629      */
630     bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
631         << KVM_CLEAR_LOG_SHIFT;
632     end = mem->memory_size / psize;
633     if (bmap_npages > end - bmap_start) {
634         bmap_npages = end - bmap_start;
635     }
636     start_delta /= psize;
637 
638     /*
639      * Prepare the bitmap to clear dirty bits.  Here we must guarantee
640      * that we won't clear any unknown dirty bits otherwise we might
641      * accidentally clear some set bits which are not yet synced from
642      * the kernel into QEMU's bitmap, then we'll lose track of the
643      * guest modifications upon those pages (which can directly lead
644      * to guest data loss or panic after migration).
645      *
646      * Layout of the KVMSlot.dirty_bmap:
647      *
648      *                   |<-------- bmap_npages -----------..>|
649      *                                                     [1]
650      *                     start_delta         size
651      *  |----------------|-------------|------------------|------------|
652      *  ^                ^             ^                               ^
653      *  |                |             |                               |
654      * start          bmap_start     (start)                         end
655      * of memslot                                             of memslot
656      *
657      * [1] bmap_npages can be aligned to either 64 pages or the end of slot
658      */
659 
660     assert(bmap_start % BITS_PER_LONG == 0);
661     /* We should never do log_clear before log_sync */
662     assert(mem->dirty_bmap);
663     if (start_delta) {
664         /* Slow path - we need to manipulate a temp bitmap */
665         bmap_clear = bitmap_new(bmap_npages);
666         bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
667                                     bmap_start, start_delta + size / psize);
668         /*
669          * We need to fill the holes at start because that was not
670          * specified by the caller and we extended the bitmap only for
671          * 64 pages alignment
672          */
673         bitmap_clear(bmap_clear, 0, start_delta);
674         d.dirty_bitmap = bmap_clear;
675     } else {
676         /* Fast path - start address aligns well with BITS_PER_LONG */
677         d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
678     }
679 
680     d.first_page = bmap_start;
681     /* It should never overflow.  If it happens, say something */
682     assert(bmap_npages <= UINT32_MAX);
683     d.num_pages = bmap_npages;
684     d.slot = mem->slot | (as_id << 16);
685 
686     if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
687         ret = -errno;
688         error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
689                      "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
690                      __func__, d.slot, (uint64_t)d.first_page,
691                      (uint32_t)d.num_pages, ret);
692     } else {
693         ret = 0;
694         trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
695     }
696 
697     /*
698      * After we have updated the remote dirty bitmap, we update the
699      * cached bitmap as well for the memslot, then if another user
700      * clears the same region we know we shouldn't clear it again on
701      * the remote otherwise it's data loss as well.
702      */
703     bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
704                  size / psize);
705     /* This handles the NULL case well */
706     g_free(bmap_clear);
707     return ret;
708 }
709 
710 
711 /**
712  * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
713  *
714  * NOTE: this will be a no-op if we haven't enabled manual dirty log
715  * protection in the host kernel because in that case this operation
716  * will be done within log_sync().
717  *
718  * @kml:     the kvm memory listener
719  * @section: the memory range to clear dirty bitmap
720  */
721 static int kvm_physical_log_clear(KVMMemoryListener *kml,
722                                   MemoryRegionSection *section)
723 {
724     KVMState *s = kvm_state;
725     uint64_t start, size, offset, count;
726     KVMSlot *mem;
727     int ret = 0, i;
728 
729     if (!s->manual_dirty_log_protect) {
730         /* No need to do explicit clear */
731         return ret;
732     }
733 
734     start = section->offset_within_address_space;
735     size = int128_get64(section->size);
736 
737     if (!size) {
738         /* Nothing more we can do... */
739         return ret;
740     }
741 
742     kvm_slots_lock(kml);
743 
744     for (i = 0; i < s->nr_slots; i++) {
745         mem = &kml->slots[i];
746         /* Discard slots that are empty or do not overlap the section */
747         if (!mem->memory_size ||
748             mem->start_addr > start + size - 1 ||
749             start > mem->start_addr + mem->memory_size - 1) {
750             continue;
751         }
752 
753         if (start >= mem->start_addr) {
754             /* The slot starts before section or is aligned to it.  */
755             offset = start - mem->start_addr;
756             count = MIN(mem->memory_size - offset, size);
757         } else {
758             /* The slot starts after section.  */
759             offset = 0;
760             count = MIN(mem->memory_size, size - (mem->start_addr - start));
761         }
762         ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
763         if (ret < 0) {
764             break;
765         }
766     }
767 
768     kvm_slots_unlock(kml);
769 
770     return ret;
771 }
772 
773 static void kvm_coalesce_mmio_region(MemoryListener *listener,
774                                      MemoryRegionSection *secion,
775                                      hwaddr start, hwaddr size)
776 {
777     KVMState *s = kvm_state;
778 
779     if (s->coalesced_mmio) {
780         struct kvm_coalesced_mmio_zone zone;
781 
782         zone.addr = start;
783         zone.size = size;
784         zone.pad = 0;
785 
786         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
787     }
788 }
789 
790 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
791                                        MemoryRegionSection *secion,
792                                        hwaddr start, hwaddr size)
793 {
794     KVMState *s = kvm_state;
795 
796     if (s->coalesced_mmio) {
797         struct kvm_coalesced_mmio_zone zone;
798 
799         zone.addr = start;
800         zone.size = size;
801         zone.pad = 0;
802 
803         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
804     }
805 }
806 
807 static void kvm_coalesce_pio_add(MemoryListener *listener,
808                                 MemoryRegionSection *section,
809                                 hwaddr start, hwaddr size)
810 {
811     KVMState *s = kvm_state;
812 
813     if (s->coalesced_pio) {
814         struct kvm_coalesced_mmio_zone zone;
815 
816         zone.addr = start;
817         zone.size = size;
818         zone.pio = 1;
819 
820         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
821     }
822 }
823 
824 static void kvm_coalesce_pio_del(MemoryListener *listener,
825                                 MemoryRegionSection *section,
826                                 hwaddr start, hwaddr size)
827 {
828     KVMState *s = kvm_state;
829 
830     if (s->coalesced_pio) {
831         struct kvm_coalesced_mmio_zone zone;
832 
833         zone.addr = start;
834         zone.size = size;
835         zone.pio = 1;
836 
837         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
838      }
839 }
840 
841 static MemoryListener kvm_coalesced_pio_listener = {
842     .coalesced_io_add = kvm_coalesce_pio_add,
843     .coalesced_io_del = kvm_coalesce_pio_del,
844 };
845 
846 int kvm_check_extension(KVMState *s, unsigned int extension)
847 {
848     int ret;
849 
850     ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
851     if (ret < 0) {
852         ret = 0;
853     }
854 
855     return ret;
856 }
857 
858 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
859 {
860     int ret;
861 
862     ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
863     if (ret < 0) {
864         /* VM wide version not implemented, use global one instead */
865         ret = kvm_check_extension(s, extension);
866     }
867 
868     return ret;
869 }
870 
871 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
872 {
873 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
874     /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
875      * endianness, but the memory core hands them in target endianness.
876      * For example, PPC is always treated as big-endian even if running
877      * on KVM and on PPC64LE.  Correct here.
878      */
879     switch (size) {
880     case 2:
881         val = bswap16(val);
882         break;
883     case 4:
884         val = bswap32(val);
885         break;
886     }
887 #endif
888     return val;
889 }
890 
891 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
892                                   bool assign, uint32_t size, bool datamatch)
893 {
894     int ret;
895     struct kvm_ioeventfd iofd = {
896         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
897         .addr = addr,
898         .len = size,
899         .flags = 0,
900         .fd = fd,
901     };
902 
903     trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
904                                  datamatch);
905     if (!kvm_enabled()) {
906         return -ENOSYS;
907     }
908 
909     if (datamatch) {
910         iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
911     }
912     if (!assign) {
913         iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
914     }
915 
916     ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
917 
918     if (ret < 0) {
919         return -errno;
920     }
921 
922     return 0;
923 }
924 
925 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
926                                  bool assign, uint32_t size, bool datamatch)
927 {
928     struct kvm_ioeventfd kick = {
929         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
930         .addr = addr,
931         .flags = KVM_IOEVENTFD_FLAG_PIO,
932         .len = size,
933         .fd = fd,
934     };
935     int r;
936     trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
937     if (!kvm_enabled()) {
938         return -ENOSYS;
939     }
940     if (datamatch) {
941         kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
942     }
943     if (!assign) {
944         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
945     }
946     r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
947     if (r < 0) {
948         return r;
949     }
950     return 0;
951 }
952 
953 
954 static int kvm_check_many_ioeventfds(void)
955 {
956     /* Userspace can use ioeventfd for io notification.  This requires a host
957      * that supports eventfd(2) and an I/O thread; since eventfd does not
958      * support SIGIO it cannot interrupt the vcpu.
959      *
960      * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
961      * can avoid creating too many ioeventfds.
962      */
963 #if defined(CONFIG_EVENTFD)
964     int ioeventfds[7];
965     int i, ret = 0;
966     for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
967         ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
968         if (ioeventfds[i] < 0) {
969             break;
970         }
971         ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
972         if (ret < 0) {
973             close(ioeventfds[i]);
974             break;
975         }
976     }
977 
978     /* Decide whether many devices are supported or not */
979     ret = i == ARRAY_SIZE(ioeventfds);
980 
981     while (i-- > 0) {
982         kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
983         close(ioeventfds[i]);
984     }
985     return ret;
986 #else
987     return 0;
988 #endif
989 }
990 
991 static const KVMCapabilityInfo *
992 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
993 {
994     while (list->name) {
995         if (!kvm_check_extension(s, list->value)) {
996             return list;
997         }
998         list++;
999     }
1000     return NULL;
1001 }
1002 
1003 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1004 {
1005     g_assert(
1006         ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
1007     );
1008     kvm_max_slot_size = max_slot_size;
1009 }
1010 
1011 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1012                              MemoryRegionSection *section, bool add)
1013 {
1014     KVMSlot *mem;
1015     int err;
1016     MemoryRegion *mr = section->mr;
1017     bool writeable = !mr->readonly && !mr->rom_device;
1018     hwaddr start_addr, size, slot_size;
1019     void *ram;
1020 
1021     if (!memory_region_is_ram(mr)) {
1022         if (writeable || !kvm_readonly_mem_allowed) {
1023             return;
1024         } else if (!mr->romd_mode) {
1025             /* If the memory device is not in romd_mode, then we actually want
1026              * to remove the kvm memory slot so all accesses will trap. */
1027             add = false;
1028         }
1029     }
1030 
1031     size = kvm_align_section(section, &start_addr);
1032     if (!size) {
1033         return;
1034     }
1035 
1036     /* use aligned delta to align the ram address */
1037     ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
1038           (start_addr - section->offset_within_address_space);
1039 
1040     kvm_slots_lock(kml);
1041 
1042     if (!add) {
1043         do {
1044             slot_size = MIN(kvm_max_slot_size, size);
1045             mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1046             if (!mem) {
1047                 goto out;
1048             }
1049             if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1050                 kvm_physical_sync_dirty_bitmap(kml, section);
1051             }
1052 
1053             /* unregister the slot */
1054             g_free(mem->dirty_bmap);
1055             mem->dirty_bmap = NULL;
1056             mem->memory_size = 0;
1057             mem->flags = 0;
1058             err = kvm_set_user_memory_region(kml, mem, false);
1059             if (err) {
1060                 fprintf(stderr, "%s: error unregistering slot: %s\n",
1061                         __func__, strerror(-err));
1062                 abort();
1063             }
1064             start_addr += slot_size;
1065             size -= slot_size;
1066         } while (size);
1067         goto out;
1068     }
1069 
1070     /* register the new slot */
1071     do {
1072         slot_size = MIN(kvm_max_slot_size, size);
1073         mem = kvm_alloc_slot(kml);
1074         mem->memory_size = slot_size;
1075         mem->start_addr = start_addr;
1076         mem->ram = ram;
1077         mem->flags = kvm_mem_flags(mr);
1078 
1079         if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1080             /*
1081              * Reallocate the bmap; it means it doesn't disappear in
1082              * middle of a migrate.
1083              */
1084             kvm_memslot_init_dirty_bitmap(mem);
1085         }
1086         err = kvm_set_user_memory_region(kml, mem, true);
1087         if (err) {
1088             fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1089                     strerror(-err));
1090             abort();
1091         }
1092         start_addr += slot_size;
1093         ram += slot_size;
1094         size -= slot_size;
1095     } while (size);
1096 
1097 out:
1098     kvm_slots_unlock(kml);
1099 }
1100 
1101 static void kvm_region_add(MemoryListener *listener,
1102                            MemoryRegionSection *section)
1103 {
1104     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1105 
1106     memory_region_ref(section->mr);
1107     kvm_set_phys_mem(kml, section, true);
1108 }
1109 
1110 static void kvm_region_del(MemoryListener *listener,
1111                            MemoryRegionSection *section)
1112 {
1113     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1114 
1115     kvm_set_phys_mem(kml, section, false);
1116     memory_region_unref(section->mr);
1117 }
1118 
1119 static void kvm_log_sync(MemoryListener *listener,
1120                          MemoryRegionSection *section)
1121 {
1122     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1123     int r;
1124 
1125     kvm_slots_lock(kml);
1126     r = kvm_physical_sync_dirty_bitmap(kml, section);
1127     kvm_slots_unlock(kml);
1128     if (r < 0) {
1129         abort();
1130     }
1131 }
1132 
1133 static void kvm_log_clear(MemoryListener *listener,
1134                           MemoryRegionSection *section)
1135 {
1136     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1137     int r;
1138 
1139     r = kvm_physical_log_clear(kml, section);
1140     if (r < 0) {
1141         error_report_once("%s: kvm log clear failed: mr=%s "
1142                           "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1143                           section->mr->name, section->offset_within_region,
1144                           int128_get64(section->size));
1145         abort();
1146     }
1147 }
1148 
1149 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1150                                   MemoryRegionSection *section,
1151                                   bool match_data, uint64_t data,
1152                                   EventNotifier *e)
1153 {
1154     int fd = event_notifier_get_fd(e);
1155     int r;
1156 
1157     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1158                                data, true, int128_get64(section->size),
1159                                match_data);
1160     if (r < 0) {
1161         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1162                 __func__, strerror(-r), -r);
1163         abort();
1164     }
1165 }
1166 
1167 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1168                                   MemoryRegionSection *section,
1169                                   bool match_data, uint64_t data,
1170                                   EventNotifier *e)
1171 {
1172     int fd = event_notifier_get_fd(e);
1173     int r;
1174 
1175     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1176                                data, false, int128_get64(section->size),
1177                                match_data);
1178     if (r < 0) {
1179         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1180                 __func__, strerror(-r), -r);
1181         abort();
1182     }
1183 }
1184 
1185 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1186                                  MemoryRegionSection *section,
1187                                  bool match_data, uint64_t data,
1188                                  EventNotifier *e)
1189 {
1190     int fd = event_notifier_get_fd(e);
1191     int r;
1192 
1193     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1194                               data, true, int128_get64(section->size),
1195                               match_data);
1196     if (r < 0) {
1197         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1198                 __func__, strerror(-r), -r);
1199         abort();
1200     }
1201 }
1202 
1203 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1204                                  MemoryRegionSection *section,
1205                                  bool match_data, uint64_t data,
1206                                  EventNotifier *e)
1207 
1208 {
1209     int fd = event_notifier_get_fd(e);
1210     int r;
1211 
1212     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1213                               data, false, int128_get64(section->size),
1214                               match_data);
1215     if (r < 0) {
1216         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1217                 __func__, strerror(-r), -r);
1218         abort();
1219     }
1220 }
1221 
1222 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1223                                   AddressSpace *as, int as_id)
1224 {
1225     int i;
1226 
1227     qemu_mutex_init(&kml->slots_lock);
1228     kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
1229     kml->as_id = as_id;
1230 
1231     for (i = 0; i < s->nr_slots; i++) {
1232         kml->slots[i].slot = i;
1233     }
1234 
1235     kml->listener.region_add = kvm_region_add;
1236     kml->listener.region_del = kvm_region_del;
1237     kml->listener.log_start = kvm_log_start;
1238     kml->listener.log_stop = kvm_log_stop;
1239     kml->listener.log_sync = kvm_log_sync;
1240     kml->listener.log_clear = kvm_log_clear;
1241     kml->listener.priority = 10;
1242 
1243     memory_listener_register(&kml->listener, as);
1244 
1245     for (i = 0; i < s->nr_as; ++i) {
1246         if (!s->as[i].as) {
1247             s->as[i].as = as;
1248             s->as[i].ml = kml;
1249             break;
1250         }
1251     }
1252 }
1253 
1254 static MemoryListener kvm_io_listener = {
1255     .eventfd_add = kvm_io_ioeventfd_add,
1256     .eventfd_del = kvm_io_ioeventfd_del,
1257     .priority = 10,
1258 };
1259 
1260 int kvm_set_irq(KVMState *s, int irq, int level)
1261 {
1262     struct kvm_irq_level event;
1263     int ret;
1264 
1265     assert(kvm_async_interrupts_enabled());
1266 
1267     event.level = level;
1268     event.irq = irq;
1269     ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1270     if (ret < 0) {
1271         perror("kvm_set_irq");
1272         abort();
1273     }
1274 
1275     return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1276 }
1277 
1278 #ifdef KVM_CAP_IRQ_ROUTING
1279 typedef struct KVMMSIRoute {
1280     struct kvm_irq_routing_entry kroute;
1281     QTAILQ_ENTRY(KVMMSIRoute) entry;
1282 } KVMMSIRoute;
1283 
1284 static void set_gsi(KVMState *s, unsigned int gsi)
1285 {
1286     set_bit(gsi, s->used_gsi_bitmap);
1287 }
1288 
1289 static void clear_gsi(KVMState *s, unsigned int gsi)
1290 {
1291     clear_bit(gsi, s->used_gsi_bitmap);
1292 }
1293 
1294 void kvm_init_irq_routing(KVMState *s)
1295 {
1296     int gsi_count, i;
1297 
1298     gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1299     if (gsi_count > 0) {
1300         /* Round up so we can search ints using ffs */
1301         s->used_gsi_bitmap = bitmap_new(gsi_count);
1302         s->gsi_count = gsi_count;
1303     }
1304 
1305     s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1306     s->nr_allocated_irq_routes = 0;
1307 
1308     if (!kvm_direct_msi_allowed) {
1309         for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1310             QTAILQ_INIT(&s->msi_hashtab[i]);
1311         }
1312     }
1313 
1314     kvm_arch_init_irq_routing(s);
1315 }
1316 
1317 void kvm_irqchip_commit_routes(KVMState *s)
1318 {
1319     int ret;
1320 
1321     if (kvm_gsi_direct_mapping()) {
1322         return;
1323     }
1324 
1325     if (!kvm_gsi_routing_enabled()) {
1326         return;
1327     }
1328 
1329     s->irq_routes->flags = 0;
1330     trace_kvm_irqchip_commit_routes();
1331     ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1332     assert(ret == 0);
1333 }
1334 
1335 static void kvm_add_routing_entry(KVMState *s,
1336                                   struct kvm_irq_routing_entry *entry)
1337 {
1338     struct kvm_irq_routing_entry *new;
1339     int n, size;
1340 
1341     if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1342         n = s->nr_allocated_irq_routes * 2;
1343         if (n < 64) {
1344             n = 64;
1345         }
1346         size = sizeof(struct kvm_irq_routing);
1347         size += n * sizeof(*new);
1348         s->irq_routes = g_realloc(s->irq_routes, size);
1349         s->nr_allocated_irq_routes = n;
1350     }
1351     n = s->irq_routes->nr++;
1352     new = &s->irq_routes->entries[n];
1353 
1354     *new = *entry;
1355 
1356     set_gsi(s, entry->gsi);
1357 }
1358 
1359 static int kvm_update_routing_entry(KVMState *s,
1360                                     struct kvm_irq_routing_entry *new_entry)
1361 {
1362     struct kvm_irq_routing_entry *entry;
1363     int n;
1364 
1365     for (n = 0; n < s->irq_routes->nr; n++) {
1366         entry = &s->irq_routes->entries[n];
1367         if (entry->gsi != new_entry->gsi) {
1368             continue;
1369         }
1370 
1371         if(!memcmp(entry, new_entry, sizeof *entry)) {
1372             return 0;
1373         }
1374 
1375         *entry = *new_entry;
1376 
1377         return 0;
1378     }
1379 
1380     return -ESRCH;
1381 }
1382 
1383 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1384 {
1385     struct kvm_irq_routing_entry e = {};
1386 
1387     assert(pin < s->gsi_count);
1388 
1389     e.gsi = irq;
1390     e.type = KVM_IRQ_ROUTING_IRQCHIP;
1391     e.flags = 0;
1392     e.u.irqchip.irqchip = irqchip;
1393     e.u.irqchip.pin = pin;
1394     kvm_add_routing_entry(s, &e);
1395 }
1396 
1397 void kvm_irqchip_release_virq(KVMState *s, int virq)
1398 {
1399     struct kvm_irq_routing_entry *e;
1400     int i;
1401 
1402     if (kvm_gsi_direct_mapping()) {
1403         return;
1404     }
1405 
1406     for (i = 0; i < s->irq_routes->nr; i++) {
1407         e = &s->irq_routes->entries[i];
1408         if (e->gsi == virq) {
1409             s->irq_routes->nr--;
1410             *e = s->irq_routes->entries[s->irq_routes->nr];
1411         }
1412     }
1413     clear_gsi(s, virq);
1414     kvm_arch_release_virq_post(virq);
1415     trace_kvm_irqchip_release_virq(virq);
1416 }
1417 
1418 void kvm_irqchip_add_change_notifier(Notifier *n)
1419 {
1420     notifier_list_add(&kvm_irqchip_change_notifiers, n);
1421 }
1422 
1423 void kvm_irqchip_remove_change_notifier(Notifier *n)
1424 {
1425     notifier_remove(n);
1426 }
1427 
1428 void kvm_irqchip_change_notify(void)
1429 {
1430     notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
1431 }
1432 
1433 static unsigned int kvm_hash_msi(uint32_t data)
1434 {
1435     /* This is optimized for IA32 MSI layout. However, no other arch shall
1436      * repeat the mistake of not providing a direct MSI injection API. */
1437     return data & 0xff;
1438 }
1439 
1440 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1441 {
1442     KVMMSIRoute *route, *next;
1443     unsigned int hash;
1444 
1445     for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1446         QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1447             kvm_irqchip_release_virq(s, route->kroute.gsi);
1448             QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1449             g_free(route);
1450         }
1451     }
1452 }
1453 
1454 static int kvm_irqchip_get_virq(KVMState *s)
1455 {
1456     int next_virq;
1457 
1458     /*
1459      * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1460      * GSI numbers are more than the number of IRQ route. Allocating a GSI
1461      * number can succeed even though a new route entry cannot be added.
1462      * When this happens, flush dynamic MSI entries to free IRQ route entries.
1463      */
1464     if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1465         kvm_flush_dynamic_msi_routes(s);
1466     }
1467 
1468     /* Return the lowest unused GSI in the bitmap */
1469     next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1470     if (next_virq >= s->gsi_count) {
1471         return -ENOSPC;
1472     } else {
1473         return next_virq;
1474     }
1475 }
1476 
1477 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1478 {
1479     unsigned int hash = kvm_hash_msi(msg.data);
1480     KVMMSIRoute *route;
1481 
1482     QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1483         if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1484             route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1485             route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1486             return route;
1487         }
1488     }
1489     return NULL;
1490 }
1491 
1492 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1493 {
1494     struct kvm_msi msi;
1495     KVMMSIRoute *route;
1496 
1497     if (kvm_direct_msi_allowed) {
1498         msi.address_lo = (uint32_t)msg.address;
1499         msi.address_hi = msg.address >> 32;
1500         msi.data = le32_to_cpu(msg.data);
1501         msi.flags = 0;
1502         memset(msi.pad, 0, sizeof(msi.pad));
1503 
1504         return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1505     }
1506 
1507     route = kvm_lookup_msi_route(s, msg);
1508     if (!route) {
1509         int virq;
1510 
1511         virq = kvm_irqchip_get_virq(s);
1512         if (virq < 0) {
1513             return virq;
1514         }
1515 
1516         route = g_malloc0(sizeof(KVMMSIRoute));
1517         route->kroute.gsi = virq;
1518         route->kroute.type = KVM_IRQ_ROUTING_MSI;
1519         route->kroute.flags = 0;
1520         route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1521         route->kroute.u.msi.address_hi = msg.address >> 32;
1522         route->kroute.u.msi.data = le32_to_cpu(msg.data);
1523 
1524         kvm_add_routing_entry(s, &route->kroute);
1525         kvm_irqchip_commit_routes(s);
1526 
1527         QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1528                            entry);
1529     }
1530 
1531     assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1532 
1533     return kvm_set_irq(s, route->kroute.gsi, 1);
1534 }
1535 
1536 int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1537 {
1538     struct kvm_irq_routing_entry kroute = {};
1539     int virq;
1540     MSIMessage msg = {0, 0};
1541 
1542     if (pci_available && dev) {
1543         msg = pci_get_msi_message(dev, vector);
1544     }
1545 
1546     if (kvm_gsi_direct_mapping()) {
1547         return kvm_arch_msi_data_to_gsi(msg.data);
1548     }
1549 
1550     if (!kvm_gsi_routing_enabled()) {
1551         return -ENOSYS;
1552     }
1553 
1554     virq = kvm_irqchip_get_virq(s);
1555     if (virq < 0) {
1556         return virq;
1557     }
1558 
1559     kroute.gsi = virq;
1560     kroute.type = KVM_IRQ_ROUTING_MSI;
1561     kroute.flags = 0;
1562     kroute.u.msi.address_lo = (uint32_t)msg.address;
1563     kroute.u.msi.address_hi = msg.address >> 32;
1564     kroute.u.msi.data = le32_to_cpu(msg.data);
1565     if (pci_available && kvm_msi_devid_required()) {
1566         kroute.flags = KVM_MSI_VALID_DEVID;
1567         kroute.u.msi.devid = pci_requester_id(dev);
1568     }
1569     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1570         kvm_irqchip_release_virq(s, virq);
1571         return -EINVAL;
1572     }
1573 
1574     trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
1575                                     vector, virq);
1576 
1577     kvm_add_routing_entry(s, &kroute);
1578     kvm_arch_add_msi_route_post(&kroute, vector, dev);
1579     kvm_irqchip_commit_routes(s);
1580 
1581     return virq;
1582 }
1583 
1584 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
1585                                  PCIDevice *dev)
1586 {
1587     struct kvm_irq_routing_entry kroute = {};
1588 
1589     if (kvm_gsi_direct_mapping()) {
1590         return 0;
1591     }
1592 
1593     if (!kvm_irqchip_in_kernel()) {
1594         return -ENOSYS;
1595     }
1596 
1597     kroute.gsi = virq;
1598     kroute.type = KVM_IRQ_ROUTING_MSI;
1599     kroute.flags = 0;
1600     kroute.u.msi.address_lo = (uint32_t)msg.address;
1601     kroute.u.msi.address_hi = msg.address >> 32;
1602     kroute.u.msi.data = le32_to_cpu(msg.data);
1603     if (pci_available && kvm_msi_devid_required()) {
1604         kroute.flags = KVM_MSI_VALID_DEVID;
1605         kroute.u.msi.devid = pci_requester_id(dev);
1606     }
1607     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1608         return -EINVAL;
1609     }
1610 
1611     trace_kvm_irqchip_update_msi_route(virq);
1612 
1613     return kvm_update_routing_entry(s, &kroute);
1614 }
1615 
1616 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
1617                                     bool assign)
1618 {
1619     struct kvm_irqfd irqfd = {
1620         .fd = fd,
1621         .gsi = virq,
1622         .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1623     };
1624 
1625     if (rfd != -1) {
1626         irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1627         irqfd.resamplefd = rfd;
1628     }
1629 
1630     if (!kvm_irqfds_enabled()) {
1631         return -ENOSYS;
1632     }
1633 
1634     return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1635 }
1636 
1637 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1638 {
1639     struct kvm_irq_routing_entry kroute = {};
1640     int virq;
1641 
1642     if (!kvm_gsi_routing_enabled()) {
1643         return -ENOSYS;
1644     }
1645 
1646     virq = kvm_irqchip_get_virq(s);
1647     if (virq < 0) {
1648         return virq;
1649     }
1650 
1651     kroute.gsi = virq;
1652     kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1653     kroute.flags = 0;
1654     kroute.u.adapter.summary_addr = adapter->summary_addr;
1655     kroute.u.adapter.ind_addr = adapter->ind_addr;
1656     kroute.u.adapter.summary_offset = adapter->summary_offset;
1657     kroute.u.adapter.ind_offset = adapter->ind_offset;
1658     kroute.u.adapter.adapter_id = adapter->adapter_id;
1659 
1660     kvm_add_routing_entry(s, &kroute);
1661 
1662     return virq;
1663 }
1664 
1665 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1666 {
1667     struct kvm_irq_routing_entry kroute = {};
1668     int virq;
1669 
1670     if (!kvm_gsi_routing_enabled()) {
1671         return -ENOSYS;
1672     }
1673     if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
1674         return -ENOSYS;
1675     }
1676     virq = kvm_irqchip_get_virq(s);
1677     if (virq < 0) {
1678         return virq;
1679     }
1680 
1681     kroute.gsi = virq;
1682     kroute.type = KVM_IRQ_ROUTING_HV_SINT;
1683     kroute.flags = 0;
1684     kroute.u.hv_sint.vcpu = vcpu;
1685     kroute.u.hv_sint.sint = sint;
1686 
1687     kvm_add_routing_entry(s, &kroute);
1688     kvm_irqchip_commit_routes(s);
1689 
1690     return virq;
1691 }
1692 
1693 #else /* !KVM_CAP_IRQ_ROUTING */
1694 
1695 void kvm_init_irq_routing(KVMState *s)
1696 {
1697 }
1698 
1699 void kvm_irqchip_release_virq(KVMState *s, int virq)
1700 {
1701 }
1702 
1703 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1704 {
1705     abort();
1706 }
1707 
1708 int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1709 {
1710     return -ENOSYS;
1711 }
1712 
1713 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1714 {
1715     return -ENOSYS;
1716 }
1717 
1718 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1719 {
1720     return -ENOSYS;
1721 }
1722 
1723 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1724 {
1725     abort();
1726 }
1727 
1728 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1729 {
1730     return -ENOSYS;
1731 }
1732 #endif /* !KVM_CAP_IRQ_ROUTING */
1733 
1734 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1735                                        EventNotifier *rn, int virq)
1736 {
1737     return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
1738            rn ? event_notifier_get_fd(rn) : -1, virq, true);
1739 }
1740 
1741 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1742                                           int virq)
1743 {
1744     return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
1745            false);
1746 }
1747 
1748 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1749                                    EventNotifier *rn, qemu_irq irq)
1750 {
1751     gpointer key, gsi;
1752     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1753 
1754     if (!found) {
1755         return -ENXIO;
1756     }
1757     return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
1758 }
1759 
1760 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
1761                                       qemu_irq irq)
1762 {
1763     gpointer key, gsi;
1764     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1765 
1766     if (!found) {
1767         return -ENXIO;
1768     }
1769     return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
1770 }
1771 
1772 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
1773 {
1774     g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
1775 }
1776 
1777 static void kvm_irqchip_create(KVMState *s)
1778 {
1779     int ret;
1780 
1781     if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1782         ;
1783     } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
1784         ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
1785         if (ret < 0) {
1786             fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
1787             exit(1);
1788         }
1789     } else {
1790         return;
1791     }
1792 
1793     /* First probe and see if there's a arch-specific hook to create the
1794      * in-kernel irqchip for us */
1795     ret = kvm_arch_irqchip_create(s);
1796     if (ret == 0) {
1797         if (kvm_kernel_irqchip_split()) {
1798             perror("Split IRQ chip mode not supported.");
1799             exit(1);
1800         } else {
1801             ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1802         }
1803     }
1804     if (ret < 0) {
1805         fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
1806         exit(1);
1807     }
1808 
1809     kvm_kernel_irqchip = true;
1810     /* If we have an in-kernel IRQ chip then we must have asynchronous
1811      * interrupt delivery (though the reverse is not necessarily true)
1812      */
1813     kvm_async_interrupts_allowed = true;
1814     kvm_halt_in_kernel_allowed = true;
1815 
1816     kvm_init_irq_routing(s);
1817 
1818     s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
1819 }
1820 
1821 /* Find number of supported CPUs using the recommended
1822  * procedure from the kernel API documentation to cope with
1823  * older kernels that may be missing capabilities.
1824  */
1825 static int kvm_recommended_vcpus(KVMState *s)
1826 {
1827     int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
1828     return (ret) ? ret : 4;
1829 }
1830 
1831 static int kvm_max_vcpus(KVMState *s)
1832 {
1833     int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1834     return (ret) ? ret : kvm_recommended_vcpus(s);
1835 }
1836 
1837 static int kvm_max_vcpu_id(KVMState *s)
1838 {
1839     int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
1840     return (ret) ? ret : kvm_max_vcpus(s);
1841 }
1842 
1843 bool kvm_vcpu_id_is_valid(int vcpu_id)
1844 {
1845     KVMState *s = KVM_STATE(current_machine->accelerator);
1846     return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
1847 }
1848 
1849 static int kvm_init(MachineState *ms)
1850 {
1851     MachineClass *mc = MACHINE_GET_CLASS(ms);
1852     static const char upgrade_note[] =
1853         "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1854         "(see http://sourceforge.net/projects/kvm).\n";
1855     struct {
1856         const char *name;
1857         int num;
1858     } num_cpus[] = {
1859         { "SMP",          ms->smp.cpus },
1860         { "hotpluggable", ms->smp.max_cpus },
1861         { NULL, }
1862     }, *nc = num_cpus;
1863     int soft_vcpus_limit, hard_vcpus_limit;
1864     KVMState *s;
1865     const KVMCapabilityInfo *missing_cap;
1866     int ret;
1867     int type = 0;
1868     const char *kvm_type;
1869 
1870     s = KVM_STATE(ms->accelerator);
1871 
1872     /*
1873      * On systems where the kernel can support different base page
1874      * sizes, host page size may be different from TARGET_PAGE_SIZE,
1875      * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
1876      * page size for the system though.
1877      */
1878     assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size);
1879 
1880     s->sigmask_len = 8;
1881 
1882 #ifdef KVM_CAP_SET_GUEST_DEBUG
1883     QTAILQ_INIT(&s->kvm_sw_breakpoints);
1884 #endif
1885     QLIST_INIT(&s->kvm_parked_vcpus);
1886     s->vmfd = -1;
1887     s->fd = qemu_open("/dev/kvm", O_RDWR);
1888     if (s->fd == -1) {
1889         fprintf(stderr, "Could not access KVM kernel module: %m\n");
1890         ret = -errno;
1891         goto err;
1892     }
1893 
1894     ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1895     if (ret < KVM_API_VERSION) {
1896         if (ret >= 0) {
1897             ret = -EINVAL;
1898         }
1899         fprintf(stderr, "kvm version too old\n");
1900         goto err;
1901     }
1902 
1903     if (ret > KVM_API_VERSION) {
1904         ret = -EINVAL;
1905         fprintf(stderr, "kvm version not supported\n");
1906         goto err;
1907     }
1908 
1909     kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
1910     s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1911 
1912     /* If unspecified, use the default value */
1913     if (!s->nr_slots) {
1914         s->nr_slots = 32;
1915     }
1916 
1917     s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
1918     if (s->nr_as <= 1) {
1919         s->nr_as = 1;
1920     }
1921     s->as = g_new0(struct KVMAs, s->nr_as);
1922 
1923     kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1924     if (mc->kvm_type) {
1925         type = mc->kvm_type(ms, kvm_type);
1926     } else if (kvm_type) {
1927         ret = -EINVAL;
1928         fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
1929         goto err;
1930     }
1931 
1932     do {
1933         ret = kvm_ioctl(s, KVM_CREATE_VM, type);
1934     } while (ret == -EINTR);
1935 
1936     if (ret < 0) {
1937         fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
1938                 strerror(-ret));
1939 
1940 #ifdef TARGET_S390X
1941         if (ret == -EINVAL) {
1942             fprintf(stderr,
1943                     "Host kernel setup problem detected. Please verify:\n");
1944             fprintf(stderr, "- for kernels supporting the switch_amode or"
1945                     " user_mode parameters, whether\n");
1946             fprintf(stderr,
1947                     "  user space is running in primary address space\n");
1948             fprintf(stderr,
1949                     "- for kernels supporting the vm.allocate_pgste sysctl, "
1950                     "whether it is enabled\n");
1951         }
1952 #endif
1953         goto err;
1954     }
1955 
1956     s->vmfd = ret;
1957 
1958     /* check the vcpu limits */
1959     soft_vcpus_limit = kvm_recommended_vcpus(s);
1960     hard_vcpus_limit = kvm_max_vcpus(s);
1961 
1962     while (nc->name) {
1963         if (nc->num > soft_vcpus_limit) {
1964             warn_report("Number of %s cpus requested (%d) exceeds "
1965                         "the recommended cpus supported by KVM (%d)",
1966                         nc->name, nc->num, soft_vcpus_limit);
1967 
1968             if (nc->num > hard_vcpus_limit) {
1969                 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
1970                         "the maximum cpus supported by KVM (%d)\n",
1971                         nc->name, nc->num, hard_vcpus_limit);
1972                 exit(1);
1973             }
1974         }
1975         nc++;
1976     }
1977 
1978     missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1979     if (!missing_cap) {
1980         missing_cap =
1981             kvm_check_extension_list(s, kvm_arch_required_capabilities);
1982     }
1983     if (missing_cap) {
1984         ret = -EINVAL;
1985         fprintf(stderr, "kvm does not support %s\n%s",
1986                 missing_cap->name, upgrade_note);
1987         goto err;
1988     }
1989 
1990     s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1991     s->coalesced_pio = s->coalesced_mmio &&
1992                        kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
1993 
1994     s->manual_dirty_log_protect =
1995         kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
1996     if (s->manual_dirty_log_protect) {
1997         ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
1998         if (ret) {
1999             warn_report("Trying to enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 "
2000                         "but failed.  Falling back to the legacy mode. ");
2001             s->manual_dirty_log_protect = false;
2002         }
2003     }
2004 
2005 #ifdef KVM_CAP_VCPU_EVENTS
2006     s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2007 #endif
2008 
2009     s->robust_singlestep =
2010         kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
2011 
2012 #ifdef KVM_CAP_DEBUGREGS
2013     s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
2014 #endif
2015 
2016     s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2017 
2018 #ifdef KVM_CAP_IRQ_ROUTING
2019     kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
2020 #endif
2021 
2022     s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
2023 
2024     s->irq_set_ioctl = KVM_IRQ_LINE;
2025     if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2026         s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2027     }
2028 
2029     kvm_readonly_mem_allowed =
2030         (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2031 
2032     kvm_eventfds_allowed =
2033         (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
2034 
2035     kvm_irqfds_allowed =
2036         (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
2037 
2038     kvm_resamplefds_allowed =
2039         (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2040 
2041     kvm_vm_attributes_allowed =
2042         (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2043 
2044     kvm_ioeventfd_any_length_allowed =
2045         (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
2046 
2047     kvm_state = s;
2048 
2049     /*
2050      * if memory encryption object is specified then initialize the memory
2051      * encryption context.
2052      */
2053     if (ms->memory_encryption) {
2054         kvm_state->memcrypt_handle = sev_guest_init(ms->memory_encryption);
2055         if (!kvm_state->memcrypt_handle) {
2056             ret = -1;
2057             goto err;
2058         }
2059 
2060         kvm_state->memcrypt_encrypt_data = sev_encrypt_data;
2061     }
2062 
2063     ret = kvm_arch_init(ms, s);
2064     if (ret < 0) {
2065         goto err;
2066     }
2067 
2068     if (machine_kernel_irqchip_allowed(ms)) {
2069         kvm_irqchip_create(s);
2070     }
2071 
2072     if (kvm_eventfds_allowed) {
2073         s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2074         s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2075     }
2076     s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2077     s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2078 
2079     kvm_memory_listener_register(s, &s->memory_listener,
2080                                  &address_space_memory, 0);
2081     memory_listener_register(&kvm_io_listener,
2082                              &address_space_io);
2083     memory_listener_register(&kvm_coalesced_pio_listener,
2084                              &address_space_io);
2085 
2086     s->many_ioeventfds = kvm_check_many_ioeventfds();
2087 
2088     s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2089     if (!s->sync_mmu) {
2090         qemu_balloon_inhibit(true);
2091     }
2092 
2093     return 0;
2094 
2095 err:
2096     assert(ret < 0);
2097     if (s->vmfd >= 0) {
2098         close(s->vmfd);
2099     }
2100     if (s->fd != -1) {
2101         close(s->fd);
2102     }
2103     g_free(s->memory_listener.slots);
2104 
2105     return ret;
2106 }
2107 
2108 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2109 {
2110     s->sigmask_len = sigmask_len;
2111 }
2112 
2113 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2114                           int size, uint32_t count)
2115 {
2116     int i;
2117     uint8_t *ptr = data;
2118 
2119     for (i = 0; i < count; i++) {
2120         address_space_rw(&address_space_io, port, attrs,
2121                          ptr, size,
2122                          direction == KVM_EXIT_IO_OUT);
2123         ptr += size;
2124     }
2125 }
2126 
2127 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2128 {
2129     fprintf(stderr, "KVM internal error. Suberror: %d\n",
2130             run->internal.suberror);
2131 
2132     if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
2133         int i;
2134 
2135         for (i = 0; i < run->internal.ndata; ++i) {
2136             fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
2137                     i, (uint64_t)run->internal.data[i]);
2138         }
2139     }
2140     if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2141         fprintf(stderr, "emulation failure\n");
2142         if (!kvm_arch_stop_on_emulation_error(cpu)) {
2143             cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2144             return EXCP_INTERRUPT;
2145         }
2146     }
2147     /* FIXME: Should trigger a qmp message to let management know
2148      * something went wrong.
2149      */
2150     return -1;
2151 }
2152 
2153 void kvm_flush_coalesced_mmio_buffer(void)
2154 {
2155     KVMState *s = kvm_state;
2156 
2157     if (s->coalesced_flush_in_progress) {
2158         return;
2159     }
2160 
2161     s->coalesced_flush_in_progress = true;
2162 
2163     if (s->coalesced_mmio_ring) {
2164         struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2165         while (ring->first != ring->last) {
2166             struct kvm_coalesced_mmio *ent;
2167 
2168             ent = &ring->coalesced_mmio[ring->first];
2169 
2170             if (ent->pio == 1) {
2171                 address_space_rw(&address_space_io, ent->phys_addr,
2172                                  MEMTXATTRS_UNSPECIFIED, ent->data,
2173                                  ent->len, true);
2174             } else {
2175                 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2176             }
2177             smp_wmb();
2178             ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2179         }
2180     }
2181 
2182     s->coalesced_flush_in_progress = false;
2183 }
2184 
2185 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2186 {
2187     if (!cpu->vcpu_dirty) {
2188         kvm_arch_get_registers(cpu);
2189         cpu->vcpu_dirty = true;
2190     }
2191 }
2192 
2193 void kvm_cpu_synchronize_state(CPUState *cpu)
2194 {
2195     if (!cpu->vcpu_dirty) {
2196         run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2197     }
2198 }
2199 
2200 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2201 {
2202     kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2203     cpu->vcpu_dirty = false;
2204 }
2205 
2206 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2207 {
2208     run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2209 }
2210 
2211 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2212 {
2213     kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2214     cpu->vcpu_dirty = false;
2215 }
2216 
2217 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2218 {
2219     run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2220 }
2221 
2222 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2223 {
2224     cpu->vcpu_dirty = true;
2225 }
2226 
2227 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2228 {
2229     run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2230 }
2231 
2232 #ifdef KVM_HAVE_MCE_INJECTION
2233 static __thread void *pending_sigbus_addr;
2234 static __thread int pending_sigbus_code;
2235 static __thread bool have_sigbus_pending;
2236 #endif
2237 
2238 static void kvm_cpu_kick(CPUState *cpu)
2239 {
2240     atomic_set(&cpu->kvm_run->immediate_exit, 1);
2241 }
2242 
2243 static void kvm_cpu_kick_self(void)
2244 {
2245     if (kvm_immediate_exit) {
2246         kvm_cpu_kick(current_cpu);
2247     } else {
2248         qemu_cpu_kick_self();
2249     }
2250 }
2251 
2252 static void kvm_eat_signals(CPUState *cpu)
2253 {
2254     struct timespec ts = { 0, 0 };
2255     siginfo_t siginfo;
2256     sigset_t waitset;
2257     sigset_t chkset;
2258     int r;
2259 
2260     if (kvm_immediate_exit) {
2261         atomic_set(&cpu->kvm_run->immediate_exit, 0);
2262         /* Write kvm_run->immediate_exit before the cpu->exit_request
2263          * write in kvm_cpu_exec.
2264          */
2265         smp_wmb();
2266         return;
2267     }
2268 
2269     sigemptyset(&waitset);
2270     sigaddset(&waitset, SIG_IPI);
2271 
2272     do {
2273         r = sigtimedwait(&waitset, &siginfo, &ts);
2274         if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2275             perror("sigtimedwait");
2276             exit(1);
2277         }
2278 
2279         r = sigpending(&chkset);
2280         if (r == -1) {
2281             perror("sigpending");
2282             exit(1);
2283         }
2284     } while (sigismember(&chkset, SIG_IPI));
2285 }
2286 
2287 int kvm_cpu_exec(CPUState *cpu)
2288 {
2289     struct kvm_run *run = cpu->kvm_run;
2290     int ret, run_ret;
2291 
2292     DPRINTF("kvm_cpu_exec()\n");
2293 
2294     if (kvm_arch_process_async_events(cpu)) {
2295         atomic_set(&cpu->exit_request, 0);
2296         return EXCP_HLT;
2297     }
2298 
2299     qemu_mutex_unlock_iothread();
2300     cpu_exec_start(cpu);
2301 
2302     do {
2303         MemTxAttrs attrs;
2304 
2305         if (cpu->vcpu_dirty) {
2306             kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2307             cpu->vcpu_dirty = false;
2308         }
2309 
2310         kvm_arch_pre_run(cpu, run);
2311         if (atomic_read(&cpu->exit_request)) {
2312             DPRINTF("interrupt exit requested\n");
2313             /*
2314              * KVM requires us to reenter the kernel after IO exits to complete
2315              * instruction emulation. This self-signal will ensure that we
2316              * leave ASAP again.
2317              */
2318             kvm_cpu_kick_self();
2319         }
2320 
2321         /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2322          * Matching barrier in kvm_eat_signals.
2323          */
2324         smp_rmb();
2325 
2326         run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2327 
2328         attrs = kvm_arch_post_run(cpu, run);
2329 
2330 #ifdef KVM_HAVE_MCE_INJECTION
2331         if (unlikely(have_sigbus_pending)) {
2332             qemu_mutex_lock_iothread();
2333             kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
2334                                     pending_sigbus_addr);
2335             have_sigbus_pending = false;
2336             qemu_mutex_unlock_iothread();
2337         }
2338 #endif
2339 
2340         if (run_ret < 0) {
2341             if (run_ret == -EINTR || run_ret == -EAGAIN) {
2342                 DPRINTF("io window exit\n");
2343                 kvm_eat_signals(cpu);
2344                 ret = EXCP_INTERRUPT;
2345                 break;
2346             }
2347             fprintf(stderr, "error: kvm run failed %s\n",
2348                     strerror(-run_ret));
2349 #ifdef TARGET_PPC
2350             if (run_ret == -EBUSY) {
2351                 fprintf(stderr,
2352                         "This is probably because your SMT is enabled.\n"
2353                         "VCPU can only run on primary threads with all "
2354                         "secondary threads offline.\n");
2355             }
2356 #endif
2357             ret = -1;
2358             break;
2359         }
2360 
2361         trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2362         switch (run->exit_reason) {
2363         case KVM_EXIT_IO:
2364             DPRINTF("handle_io\n");
2365             /* Called outside BQL */
2366             kvm_handle_io(run->io.port, attrs,
2367                           (uint8_t *)run + run->io.data_offset,
2368                           run->io.direction,
2369                           run->io.size,
2370                           run->io.count);
2371             ret = 0;
2372             break;
2373         case KVM_EXIT_MMIO:
2374             DPRINTF("handle_mmio\n");
2375             /* Called outside BQL */
2376             address_space_rw(&address_space_memory,
2377                              run->mmio.phys_addr, attrs,
2378                              run->mmio.data,
2379                              run->mmio.len,
2380                              run->mmio.is_write);
2381             ret = 0;
2382             break;
2383         case KVM_EXIT_IRQ_WINDOW_OPEN:
2384             DPRINTF("irq_window_open\n");
2385             ret = EXCP_INTERRUPT;
2386             break;
2387         case KVM_EXIT_SHUTDOWN:
2388             DPRINTF("shutdown\n");
2389             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2390             ret = EXCP_INTERRUPT;
2391             break;
2392         case KVM_EXIT_UNKNOWN:
2393             fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2394                     (uint64_t)run->hw.hardware_exit_reason);
2395             ret = -1;
2396             break;
2397         case KVM_EXIT_INTERNAL_ERROR:
2398             ret = kvm_handle_internal_error(cpu, run);
2399             break;
2400         case KVM_EXIT_SYSTEM_EVENT:
2401             switch (run->system_event.type) {
2402             case KVM_SYSTEM_EVENT_SHUTDOWN:
2403                 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
2404                 ret = EXCP_INTERRUPT;
2405                 break;
2406             case KVM_SYSTEM_EVENT_RESET:
2407                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2408                 ret = EXCP_INTERRUPT;
2409                 break;
2410             case KVM_SYSTEM_EVENT_CRASH:
2411                 kvm_cpu_synchronize_state(cpu);
2412                 qemu_mutex_lock_iothread();
2413                 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2414                 qemu_mutex_unlock_iothread();
2415                 ret = 0;
2416                 break;
2417             default:
2418                 DPRINTF("kvm_arch_handle_exit\n");
2419                 ret = kvm_arch_handle_exit(cpu, run);
2420                 break;
2421             }
2422             break;
2423         default:
2424             DPRINTF("kvm_arch_handle_exit\n");
2425             ret = kvm_arch_handle_exit(cpu, run);
2426             break;
2427         }
2428     } while (ret == 0);
2429 
2430     cpu_exec_end(cpu);
2431     qemu_mutex_lock_iothread();
2432 
2433     if (ret < 0) {
2434         cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2435         vm_stop(RUN_STATE_INTERNAL_ERROR);
2436     }
2437 
2438     atomic_set(&cpu->exit_request, 0);
2439     return ret;
2440 }
2441 
2442 int kvm_ioctl(KVMState *s, int type, ...)
2443 {
2444     int ret;
2445     void *arg;
2446     va_list ap;
2447 
2448     va_start(ap, type);
2449     arg = va_arg(ap, void *);
2450     va_end(ap);
2451 
2452     trace_kvm_ioctl(type, arg);
2453     ret = ioctl(s->fd, type, arg);
2454     if (ret == -1) {
2455         ret = -errno;
2456     }
2457     return ret;
2458 }
2459 
2460 int kvm_vm_ioctl(KVMState *s, int type, ...)
2461 {
2462     int ret;
2463     void *arg;
2464     va_list ap;
2465 
2466     va_start(ap, type);
2467     arg = va_arg(ap, void *);
2468     va_end(ap);
2469 
2470     trace_kvm_vm_ioctl(type, arg);
2471     ret = ioctl(s->vmfd, type, arg);
2472     if (ret == -1) {
2473         ret = -errno;
2474     }
2475     return ret;
2476 }
2477 
2478 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
2479 {
2480     int ret;
2481     void *arg;
2482     va_list ap;
2483 
2484     va_start(ap, type);
2485     arg = va_arg(ap, void *);
2486     va_end(ap);
2487 
2488     trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
2489     ret = ioctl(cpu->kvm_fd, type, arg);
2490     if (ret == -1) {
2491         ret = -errno;
2492     }
2493     return ret;
2494 }
2495 
2496 int kvm_device_ioctl(int fd, int type, ...)
2497 {
2498     int ret;
2499     void *arg;
2500     va_list ap;
2501 
2502     va_start(ap, type);
2503     arg = va_arg(ap, void *);
2504     va_end(ap);
2505 
2506     trace_kvm_device_ioctl(fd, type, arg);
2507     ret = ioctl(fd, type, arg);
2508     if (ret == -1) {
2509         ret = -errno;
2510     }
2511     return ret;
2512 }
2513 
2514 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
2515 {
2516     int ret;
2517     struct kvm_device_attr attribute = {
2518         .group = group,
2519         .attr = attr,
2520     };
2521 
2522     if (!kvm_vm_attributes_allowed) {
2523         return 0;
2524     }
2525 
2526     ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
2527     /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2528     return ret ? 0 : 1;
2529 }
2530 
2531 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2532 {
2533     struct kvm_device_attr attribute = {
2534         .group = group,
2535         .attr = attr,
2536         .flags = 0,
2537     };
2538 
2539     return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
2540 }
2541 
2542 int kvm_device_access(int fd, int group, uint64_t attr,
2543                       void *val, bool write, Error **errp)
2544 {
2545     struct kvm_device_attr kvmattr;
2546     int err;
2547 
2548     kvmattr.flags = 0;
2549     kvmattr.group = group;
2550     kvmattr.attr = attr;
2551     kvmattr.addr = (uintptr_t)val;
2552 
2553     err = kvm_device_ioctl(fd,
2554                            write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2555                            &kvmattr);
2556     if (err < 0) {
2557         error_setg_errno(errp, -err,
2558                          "KVM_%s_DEVICE_ATTR failed: Group %d "
2559                          "attr 0x%016" PRIx64,
2560                          write ? "SET" : "GET", group, attr);
2561     }
2562     return err;
2563 }
2564 
2565 bool kvm_has_sync_mmu(void)
2566 {
2567     return kvm_state->sync_mmu;
2568 }
2569 
2570 int kvm_has_vcpu_events(void)
2571 {
2572     return kvm_state->vcpu_events;
2573 }
2574 
2575 int kvm_has_robust_singlestep(void)
2576 {
2577     return kvm_state->robust_singlestep;
2578 }
2579 
2580 int kvm_has_debugregs(void)
2581 {
2582     return kvm_state->debugregs;
2583 }
2584 
2585 int kvm_max_nested_state_length(void)
2586 {
2587     return kvm_state->max_nested_state_len;
2588 }
2589 
2590 int kvm_has_many_ioeventfds(void)
2591 {
2592     if (!kvm_enabled()) {
2593         return 0;
2594     }
2595     return kvm_state->many_ioeventfds;
2596 }
2597 
2598 int kvm_has_gsi_routing(void)
2599 {
2600 #ifdef KVM_CAP_IRQ_ROUTING
2601     return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
2602 #else
2603     return false;
2604 #endif
2605 }
2606 
2607 int kvm_has_intx_set_mask(void)
2608 {
2609     return kvm_state->intx_set_mask;
2610 }
2611 
2612 bool kvm_arm_supports_user_irq(void)
2613 {
2614     return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
2615 }
2616 
2617 #ifdef KVM_CAP_SET_GUEST_DEBUG
2618 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
2619                                                  target_ulong pc)
2620 {
2621     struct kvm_sw_breakpoint *bp;
2622 
2623     QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
2624         if (bp->pc == pc) {
2625             return bp;
2626         }
2627     }
2628     return NULL;
2629 }
2630 
2631 int kvm_sw_breakpoints_active(CPUState *cpu)
2632 {
2633     return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
2634 }
2635 
2636 struct kvm_set_guest_debug_data {
2637     struct kvm_guest_debug dbg;
2638     int err;
2639 };
2640 
2641 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
2642 {
2643     struct kvm_set_guest_debug_data *dbg_data =
2644         (struct kvm_set_guest_debug_data *) data.host_ptr;
2645 
2646     dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
2647                                    &dbg_data->dbg);
2648 }
2649 
2650 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2651 {
2652     struct kvm_set_guest_debug_data data;
2653 
2654     data.dbg.control = reinject_trap;
2655 
2656     if (cpu->singlestep_enabled) {
2657         data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2658     }
2659     kvm_arch_update_guest_debug(cpu, &data.dbg);
2660 
2661     run_on_cpu(cpu, kvm_invoke_set_guest_debug,
2662                RUN_ON_CPU_HOST_PTR(&data));
2663     return data.err;
2664 }
2665 
2666 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2667                           target_ulong len, int type)
2668 {
2669     struct kvm_sw_breakpoint *bp;
2670     int err;
2671 
2672     if (type == GDB_BREAKPOINT_SW) {
2673         bp = kvm_find_sw_breakpoint(cpu, addr);
2674         if (bp) {
2675             bp->use_count++;
2676             return 0;
2677         }
2678 
2679         bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2680         bp->pc = addr;
2681         bp->use_count = 1;
2682         err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2683         if (err) {
2684             g_free(bp);
2685             return err;
2686         }
2687 
2688         QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2689     } else {
2690         err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2691         if (err) {
2692             return err;
2693         }
2694     }
2695 
2696     CPU_FOREACH(cpu) {
2697         err = kvm_update_guest_debug(cpu, 0);
2698         if (err) {
2699             return err;
2700         }
2701     }
2702     return 0;
2703 }
2704 
2705 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2706                           target_ulong len, int type)
2707 {
2708     struct kvm_sw_breakpoint *bp;
2709     int err;
2710 
2711     if (type == GDB_BREAKPOINT_SW) {
2712         bp = kvm_find_sw_breakpoint(cpu, addr);
2713         if (!bp) {
2714             return -ENOENT;
2715         }
2716 
2717         if (bp->use_count > 1) {
2718             bp->use_count--;
2719             return 0;
2720         }
2721 
2722         err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2723         if (err) {
2724             return err;
2725         }
2726 
2727         QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2728         g_free(bp);
2729     } else {
2730         err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2731         if (err) {
2732             return err;
2733         }
2734     }
2735 
2736     CPU_FOREACH(cpu) {
2737         err = kvm_update_guest_debug(cpu, 0);
2738         if (err) {
2739             return err;
2740         }
2741     }
2742     return 0;
2743 }
2744 
2745 void kvm_remove_all_breakpoints(CPUState *cpu)
2746 {
2747     struct kvm_sw_breakpoint *bp, *next;
2748     KVMState *s = cpu->kvm_state;
2749     CPUState *tmpcpu;
2750 
2751     QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2752         if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2753             /* Try harder to find a CPU that currently sees the breakpoint. */
2754             CPU_FOREACH(tmpcpu) {
2755                 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2756                     break;
2757                 }
2758             }
2759         }
2760         QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2761         g_free(bp);
2762     }
2763     kvm_arch_remove_all_hw_breakpoints();
2764 
2765     CPU_FOREACH(cpu) {
2766         kvm_update_guest_debug(cpu, 0);
2767     }
2768 }
2769 
2770 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2771 
2772 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2773 {
2774     return -EINVAL;
2775 }
2776 
2777 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2778                           target_ulong len, int type)
2779 {
2780     return -EINVAL;
2781 }
2782 
2783 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2784                           target_ulong len, int type)
2785 {
2786     return -EINVAL;
2787 }
2788 
2789 void kvm_remove_all_breakpoints(CPUState *cpu)
2790 {
2791 }
2792 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2793 
2794 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2795 {
2796     KVMState *s = kvm_state;
2797     struct kvm_signal_mask *sigmask;
2798     int r;
2799 
2800     sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2801 
2802     sigmask->len = s->sigmask_len;
2803     memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2804     r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2805     g_free(sigmask);
2806 
2807     return r;
2808 }
2809 
2810 static void kvm_ipi_signal(int sig)
2811 {
2812     if (current_cpu) {
2813         assert(kvm_immediate_exit);
2814         kvm_cpu_kick(current_cpu);
2815     }
2816 }
2817 
2818 void kvm_init_cpu_signals(CPUState *cpu)
2819 {
2820     int r;
2821     sigset_t set;
2822     struct sigaction sigact;
2823 
2824     memset(&sigact, 0, sizeof(sigact));
2825     sigact.sa_handler = kvm_ipi_signal;
2826     sigaction(SIG_IPI, &sigact, NULL);
2827 
2828     pthread_sigmask(SIG_BLOCK, NULL, &set);
2829 #if defined KVM_HAVE_MCE_INJECTION
2830     sigdelset(&set, SIGBUS);
2831     pthread_sigmask(SIG_SETMASK, &set, NULL);
2832 #endif
2833     sigdelset(&set, SIG_IPI);
2834     if (kvm_immediate_exit) {
2835         r = pthread_sigmask(SIG_SETMASK, &set, NULL);
2836     } else {
2837         r = kvm_set_signal_mask(cpu, &set);
2838     }
2839     if (r) {
2840         fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
2841         exit(1);
2842     }
2843 }
2844 
2845 /* Called asynchronously in VCPU thread.  */
2846 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2847 {
2848 #ifdef KVM_HAVE_MCE_INJECTION
2849     if (have_sigbus_pending) {
2850         return 1;
2851     }
2852     have_sigbus_pending = true;
2853     pending_sigbus_addr = addr;
2854     pending_sigbus_code = code;
2855     atomic_set(&cpu->exit_request, 1);
2856     return 0;
2857 #else
2858     return 1;
2859 #endif
2860 }
2861 
2862 /* Called synchronously (via signalfd) in main thread.  */
2863 int kvm_on_sigbus(int code, void *addr)
2864 {
2865 #ifdef KVM_HAVE_MCE_INJECTION
2866     /* Action required MCE kills the process if SIGBUS is blocked.  Because
2867      * that's what happens in the I/O thread, where we handle MCE via signalfd,
2868      * we can only get action optional here.
2869      */
2870     assert(code != BUS_MCEERR_AR);
2871     kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
2872     return 0;
2873 #else
2874     return 1;
2875 #endif
2876 }
2877 
2878 int kvm_create_device(KVMState *s, uint64_t type, bool test)
2879 {
2880     int ret;
2881     struct kvm_create_device create_dev;
2882 
2883     create_dev.type = type;
2884     create_dev.fd = -1;
2885     create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2886 
2887     if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
2888         return -ENOTSUP;
2889     }
2890 
2891     ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
2892     if (ret) {
2893         return ret;
2894     }
2895 
2896     return test ? 0 : create_dev.fd;
2897 }
2898 
2899 bool kvm_device_supported(int vmfd, uint64_t type)
2900 {
2901     struct kvm_create_device create_dev = {
2902         .type = type,
2903         .fd = -1,
2904         .flags = KVM_CREATE_DEVICE_TEST,
2905     };
2906 
2907     if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
2908         return false;
2909     }
2910 
2911     return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
2912 }
2913 
2914 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
2915 {
2916     struct kvm_one_reg reg;
2917     int r;
2918 
2919     reg.id = id;
2920     reg.addr = (uintptr_t) source;
2921     r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
2922     if (r) {
2923         trace_kvm_failed_reg_set(id, strerror(-r));
2924     }
2925     return r;
2926 }
2927 
2928 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
2929 {
2930     struct kvm_one_reg reg;
2931     int r;
2932 
2933     reg.id = id;
2934     reg.addr = (uintptr_t) target;
2935     r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
2936     if (r) {
2937         trace_kvm_failed_reg_get(id, strerror(-r));
2938     }
2939     return r;
2940 }
2941 
2942 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
2943                                  hwaddr start_addr, hwaddr size)
2944 {
2945     KVMState *kvm = KVM_STATE(ms->accelerator);
2946     int i;
2947 
2948     for (i = 0; i < kvm->nr_as; ++i) {
2949         if (kvm->as[i].as == as && kvm->as[i].ml) {
2950             size = MIN(kvm_max_slot_size, size);
2951             return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
2952                                                     start_addr, size);
2953         }
2954     }
2955 
2956     return false;
2957 }
2958 
2959 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
2960                                    const char *name, void *opaque,
2961                                    Error **errp)
2962 {
2963     KVMState *s = KVM_STATE(obj);
2964     int64_t value = s->kvm_shadow_mem;
2965 
2966     visit_type_int(v, name, &value, errp);
2967 }
2968 
2969 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
2970                                    const char *name, void *opaque,
2971                                    Error **errp)
2972 {
2973     KVMState *s = KVM_STATE(obj);
2974     Error *error = NULL;
2975     int64_t value;
2976 
2977     visit_type_int(v, name, &value, &error);
2978     if (error) {
2979         error_propagate(errp, error);
2980         return;
2981     }
2982 
2983     s->kvm_shadow_mem = value;
2984 }
2985 
2986 bool kvm_kernel_irqchip_allowed(void)
2987 {
2988     return machine_kernel_irqchip_allowed(current_machine);
2989 }
2990 
2991 bool kvm_kernel_irqchip_required(void)
2992 {
2993     return machine_kernel_irqchip_required(current_machine);
2994 }
2995 
2996 bool kvm_kernel_irqchip_split(void)
2997 {
2998     return machine_kernel_irqchip_split(current_machine);
2999 }
3000 
3001 static void kvm_accel_instance_init(Object *obj)
3002 {
3003     KVMState *s = KVM_STATE(obj);
3004 
3005     s->kvm_shadow_mem = -1;
3006 }
3007 
3008 static void kvm_accel_class_init(ObjectClass *oc, void *data)
3009 {
3010     AccelClass *ac = ACCEL_CLASS(oc);
3011     ac->name = "KVM";
3012     ac->init_machine = kvm_init;
3013     ac->has_memory = kvm_accel_has_memory;
3014     ac->allowed = &kvm_allowed;
3015 
3016     object_class_property_add(oc, "kvm-shadow-mem", "int",
3017         kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3018         NULL, NULL, &error_abort);
3019     object_class_property_set_description(oc, "kvm-shadow-mem",
3020         "KVM shadow MMU size", &error_abort);
3021 }
3022 
3023 static const TypeInfo kvm_accel_type = {
3024     .name = TYPE_KVM_ACCEL,
3025     .parent = TYPE_ACCEL,
3026     .instance_init = kvm_accel_instance_init,
3027     .class_init = kvm_accel_class_init,
3028     .instance_size = sizeof(KVMState),
3029 };
3030 
3031 static void kvm_type_init(void)
3032 {
3033     type_register_static(&kvm_accel_type);
3034 }
3035 
3036 type_init(kvm_type_init);
3037