xref: /openbmc/qemu/accel/kvm/kvm-all.c (revision 2dc652961d6a9508d5db140765a0b22238165d88)
1 /*
2  * QEMU KVM support
3  *
4  * Copyright IBM, Corp. 2008
5  *           Red Hat, Inc. 2008
6  *
7  * Authors:
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *  Glauber Costa     <gcosta@redhat.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  *
14  */
15 
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <poll.h>
19 
20 #include <linux/kvm.h>
21 
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "gdbstub/enums.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/cpus.h"
34 #include "sysemu/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/memory.h"
37 #include "exec/ram_addr.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
40 #include "trace.h"
41 #include "hw/irq.h"
42 #include "qapi/visitor.h"
43 #include "qapi/qapi-types-common.h"
44 #include "qapi/qapi-visit-common.h"
45 #include "sysemu/reset.h"
46 #include "qemu/guest-random.h"
47 #include "sysemu/hw_accel.h"
48 #include "kvm-cpus.h"
49 #include "sysemu/dirtylimit.h"
50 #include "qemu/range.h"
51 
52 #include "hw/boards.h"
53 #include "sysemu/stats.h"
54 
55 /* This check must be after config-host.h is included */
56 #ifdef CONFIG_EVENTFD
57 #include <sys/eventfd.h>
58 #endif
59 
60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
61  * need to use the real host PAGE_SIZE, as that's what KVM will use.
62  */
63 #ifdef PAGE_SIZE
64 #undef PAGE_SIZE
65 #endif
66 #define PAGE_SIZE qemu_real_host_page_size()
67 
68 #ifndef KVM_GUESTDBG_BLOCKIRQ
69 #define KVM_GUESTDBG_BLOCKIRQ 0
70 #endif
71 
72 /* Default num of memslots to be allocated when VM starts */
73 #define  KVM_MEMSLOTS_NR_ALLOC_DEFAULT                      16
74 /* Default max allowed memslots if kernel reported nothing */
75 #define  KVM_MEMSLOTS_NR_MAX_DEFAULT                        32
76 
77 struct KVMParkedVcpu {
78     unsigned long vcpu_id;
79     int kvm_fd;
80     QLIST_ENTRY(KVMParkedVcpu) node;
81 };
82 
83 KVMState *kvm_state;
84 bool kvm_kernel_irqchip;
85 bool kvm_split_irqchip;
86 bool kvm_async_interrupts_allowed;
87 bool kvm_halt_in_kernel_allowed;
88 bool kvm_resamplefds_allowed;
89 bool kvm_msi_via_irqfd_allowed;
90 bool kvm_gsi_routing_allowed;
91 bool kvm_gsi_direct_mapping;
92 bool kvm_allowed;
93 bool kvm_readonly_mem_allowed;
94 bool kvm_vm_attributes_allowed;
95 bool kvm_msi_use_devid;
96 static bool kvm_has_guest_debug;
97 static int kvm_sstep_flags;
98 static bool kvm_immediate_exit;
99 static uint64_t kvm_supported_memory_attributes;
100 static bool kvm_guest_memfd_supported;
101 static hwaddr kvm_max_slot_size = ~0;
102 
103 static const KVMCapabilityInfo kvm_required_capabilites[] = {
104     KVM_CAP_INFO(USER_MEMORY),
105     KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
106     KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
107     KVM_CAP_INFO(INTERNAL_ERROR_DATA),
108     KVM_CAP_INFO(IOEVENTFD),
109     KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
110     KVM_CAP_LAST_INFO
111 };
112 
113 static NotifierList kvm_irqchip_change_notifiers =
114     NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
115 
116 struct KVMResampleFd {
117     int gsi;
118     EventNotifier *resample_event;
119     QLIST_ENTRY(KVMResampleFd) node;
120 };
121 typedef struct KVMResampleFd KVMResampleFd;
122 
123 /*
124  * Only used with split irqchip where we need to do the resample fd
125  * kick for the kernel from userspace.
126  */
127 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
128     QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
129 
130 static QemuMutex kml_slots_lock;
131 
132 #define kvm_slots_lock()    qemu_mutex_lock(&kml_slots_lock)
133 #define kvm_slots_unlock()  qemu_mutex_unlock(&kml_slots_lock)
134 
135 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
136 
kvm_resample_fd_remove(int gsi)137 static inline void kvm_resample_fd_remove(int gsi)
138 {
139     KVMResampleFd *rfd;
140 
141     QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
142         if (rfd->gsi == gsi) {
143             QLIST_REMOVE(rfd, node);
144             g_free(rfd);
145             break;
146         }
147     }
148 }
149 
kvm_resample_fd_insert(int gsi,EventNotifier * event)150 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
151 {
152     KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
153 
154     rfd->gsi = gsi;
155     rfd->resample_event = event;
156 
157     QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
158 }
159 
kvm_resample_fd_notify(int gsi)160 void kvm_resample_fd_notify(int gsi)
161 {
162     KVMResampleFd *rfd;
163 
164     QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
165         if (rfd->gsi == gsi) {
166             event_notifier_set(rfd->resample_event);
167             trace_kvm_resample_fd_notify(gsi);
168             return;
169         }
170     }
171 }
172 
173 /**
174  * kvm_slots_grow(): Grow the slots[] array in the KVMMemoryListener
175  *
176  * @kml: The KVMMemoryListener* to grow the slots[] array
177  * @nr_slots_new: The new size of slots[] array
178  *
179  * Returns: True if the array grows larger, false otherwise.
180  */
kvm_slots_grow(KVMMemoryListener * kml,unsigned int nr_slots_new)181 static bool kvm_slots_grow(KVMMemoryListener *kml, unsigned int nr_slots_new)
182 {
183     unsigned int i, cur = kml->nr_slots_allocated;
184     KVMSlot *slots;
185 
186     if (nr_slots_new > kvm_state->nr_slots_max) {
187         nr_slots_new = kvm_state->nr_slots_max;
188     }
189 
190     if (cur >= nr_slots_new) {
191         /* Big enough, no need to grow, or we reached max */
192         return false;
193     }
194 
195     if (cur == 0) {
196         slots = g_new0(KVMSlot, nr_slots_new);
197     } else {
198         assert(kml->slots);
199         slots = g_renew(KVMSlot, kml->slots, nr_slots_new);
200         /*
201          * g_renew() doesn't initialize extended buffers, however kvm
202          * memslots require fields to be zero-initialized. E.g. pointers,
203          * memory_size field, etc.
204          */
205         memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur));
206     }
207 
208     for (i = cur; i < nr_slots_new; i++) {
209         slots[i].slot = i;
210     }
211 
212     kml->slots = slots;
213     kml->nr_slots_allocated = nr_slots_new;
214     trace_kvm_slots_grow(cur, nr_slots_new);
215 
216     return true;
217 }
218 
kvm_slots_double(KVMMemoryListener * kml)219 static bool kvm_slots_double(KVMMemoryListener *kml)
220 {
221     return kvm_slots_grow(kml, kml->nr_slots_allocated * 2);
222 }
223 
kvm_get_max_memslots(void)224 unsigned int kvm_get_max_memslots(void)
225 {
226     KVMState *s = KVM_STATE(current_accel());
227 
228     return s->nr_slots_max;
229 }
230 
kvm_get_free_memslots(void)231 unsigned int kvm_get_free_memslots(void)
232 {
233     unsigned int used_slots = 0;
234     KVMState *s = kvm_state;
235     int i;
236 
237     kvm_slots_lock();
238     for (i = 0; i < s->nr_as; i++) {
239         if (!s->as[i].ml) {
240             continue;
241         }
242         used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used);
243     }
244     kvm_slots_unlock();
245 
246     return s->nr_slots_max - used_slots;
247 }
248 
249 /* Called with KVMMemoryListener.slots_lock held */
kvm_get_free_slot(KVMMemoryListener * kml)250 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
251 {
252     unsigned int n;
253     int i;
254 
255     for (i = 0; i < kml->nr_slots_allocated; i++) {
256         if (kml->slots[i].memory_size == 0) {
257             return &kml->slots[i];
258         }
259     }
260 
261     /*
262      * If no free slots, try to grow first by doubling.  Cache the old size
263      * here to avoid another round of search: if the grow succeeded, it
264      * means slots[] now must have the existing "n" slots occupied,
265      * followed by one or more free slots starting from slots[n].
266      */
267     n = kml->nr_slots_allocated;
268     if (kvm_slots_double(kml)) {
269         return &kml->slots[n];
270     }
271 
272     return NULL;
273 }
274 
275 /* Called with KVMMemoryListener.slots_lock held */
kvm_alloc_slot(KVMMemoryListener * kml)276 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
277 {
278     KVMSlot *slot = kvm_get_free_slot(kml);
279 
280     if (slot) {
281         return slot;
282     }
283 
284     fprintf(stderr, "%s: no free slot available\n", __func__);
285     abort();
286 }
287 
kvm_lookup_matching_slot(KVMMemoryListener * kml,hwaddr start_addr,hwaddr size)288 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
289                                          hwaddr start_addr,
290                                          hwaddr size)
291 {
292     int i;
293 
294     for (i = 0; i < kml->nr_slots_allocated; i++) {
295         KVMSlot *mem = &kml->slots[i];
296 
297         if (start_addr == mem->start_addr && size == mem->memory_size) {
298             return mem;
299         }
300     }
301 
302     return NULL;
303 }
304 
305 /*
306  * Calculate and align the start address and the size of the section.
307  * Return the size. If the size is 0, the aligned section is empty.
308  */
kvm_align_section(MemoryRegionSection * section,hwaddr * start)309 static hwaddr kvm_align_section(MemoryRegionSection *section,
310                                 hwaddr *start)
311 {
312     hwaddr size = int128_get64(section->size);
313     hwaddr delta, aligned;
314 
315     /* kvm works in page size chunks, but the function may be called
316        with sub-page size and unaligned start address. Pad the start
317        address to next and truncate size to previous page boundary. */
318     aligned = ROUND_UP(section->offset_within_address_space,
319                        qemu_real_host_page_size());
320     delta = aligned - section->offset_within_address_space;
321     *start = aligned;
322     if (delta > size) {
323         return 0;
324     }
325 
326     return (size - delta) & qemu_real_host_page_mask();
327 }
328 
kvm_physical_memory_addr_from_host(KVMState * s,void * ram,hwaddr * phys_addr)329 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
330                                        hwaddr *phys_addr)
331 {
332     KVMMemoryListener *kml = &s->memory_listener;
333     int i, ret = 0;
334 
335     kvm_slots_lock();
336     for (i = 0; i < kml->nr_slots_allocated; i++) {
337         KVMSlot *mem = &kml->slots[i];
338 
339         if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
340             *phys_addr = mem->start_addr + (ram - mem->ram);
341             ret = 1;
342             break;
343         }
344     }
345     kvm_slots_unlock();
346 
347     return ret;
348 }
349 
kvm_set_user_memory_region(KVMMemoryListener * kml,KVMSlot * slot,bool new)350 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
351 {
352     KVMState *s = kvm_state;
353     struct kvm_userspace_memory_region2 mem;
354     int ret;
355 
356     mem.slot = slot->slot | (kml->as_id << 16);
357     mem.guest_phys_addr = slot->start_addr;
358     mem.userspace_addr = (unsigned long)slot->ram;
359     mem.flags = slot->flags;
360     mem.guest_memfd = slot->guest_memfd;
361     mem.guest_memfd_offset = slot->guest_memfd_offset;
362 
363     if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
364         /* Set the slot size to 0 before setting the slot to the desired
365          * value. This is needed based on KVM commit 75d61fbc. */
366         mem.memory_size = 0;
367 
368         if (kvm_guest_memfd_supported) {
369             ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
370         } else {
371             ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
372         }
373         if (ret < 0) {
374             goto err;
375         }
376     }
377     mem.memory_size = slot->memory_size;
378     if (kvm_guest_memfd_supported) {
379         ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
380     } else {
381         ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
382     }
383     slot->old_flags = mem.flags;
384 err:
385     trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags,
386                               mem.guest_phys_addr, mem.memory_size,
387                               mem.userspace_addr, mem.guest_memfd,
388                               mem.guest_memfd_offset, ret);
389     if (ret < 0) {
390         if (kvm_guest_memfd_supported) {
391                 error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d,"
392                         " start=0x%" PRIx64 ", size=0x%" PRIx64 ","
393                         " flags=0x%" PRIx32 ", guest_memfd=%" PRId32 ","
394                         " guest_memfd_offset=0x%" PRIx64 ": %s",
395                         __func__, mem.slot, slot->start_addr,
396                         (uint64_t)mem.memory_size, mem.flags,
397                         mem.guest_memfd, (uint64_t)mem.guest_memfd_offset,
398                         strerror(errno));
399         } else {
400                 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
401                             " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
402                             __func__, mem.slot, slot->start_addr,
403                             (uint64_t)mem.memory_size, strerror(errno));
404         }
405     }
406     return ret;
407 }
408 
kvm_park_vcpu(CPUState * cpu)409 void kvm_park_vcpu(CPUState *cpu)
410 {
411     struct KVMParkedVcpu *vcpu;
412 
413     trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
414 
415     vcpu = g_malloc0(sizeof(*vcpu));
416     vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
417     vcpu->kvm_fd = cpu->kvm_fd;
418     QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
419 }
420 
kvm_unpark_vcpu(KVMState * s,unsigned long vcpu_id)421 int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id)
422 {
423     struct KVMParkedVcpu *cpu;
424     int kvm_fd = -ENOENT;
425 
426     QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
427         if (cpu->vcpu_id == vcpu_id) {
428             QLIST_REMOVE(cpu, node);
429             kvm_fd = cpu->kvm_fd;
430             g_free(cpu);
431             break;
432         }
433     }
434 
435     trace_kvm_unpark_vcpu(vcpu_id, kvm_fd > 0 ? "unparked" : "!found parked");
436 
437     return kvm_fd;
438 }
439 
kvm_reset_parked_vcpus(void * param)440 static void kvm_reset_parked_vcpus(void *param)
441 {
442     KVMState *s = param;
443     struct KVMParkedVcpu *cpu;
444 
445     QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
446         kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd);
447     }
448 }
449 
kvm_create_vcpu(CPUState * cpu)450 int kvm_create_vcpu(CPUState *cpu)
451 {
452     unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
453     KVMState *s = kvm_state;
454     int kvm_fd;
455 
456     /* check if the KVM vCPU already exist but is parked */
457     kvm_fd = kvm_unpark_vcpu(s, vcpu_id);
458     if (kvm_fd < 0) {
459         /* vCPU not parked: create a new KVM vCPU */
460         kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
461         if (kvm_fd < 0) {
462             error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id);
463             return kvm_fd;
464         }
465     }
466 
467     cpu->kvm_fd = kvm_fd;
468     cpu->kvm_state = s;
469     cpu->vcpu_dirty = true;
470     cpu->dirty_pages = 0;
471     cpu->throttle_us_per_full = 0;
472 
473     trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd);
474 
475     return 0;
476 }
477 
kvm_create_and_park_vcpu(CPUState * cpu)478 int kvm_create_and_park_vcpu(CPUState *cpu)
479 {
480     int ret = 0;
481 
482     ret = kvm_create_vcpu(cpu);
483     if (!ret) {
484         kvm_park_vcpu(cpu);
485     }
486 
487     return ret;
488 }
489 
do_kvm_destroy_vcpu(CPUState * cpu)490 static int do_kvm_destroy_vcpu(CPUState *cpu)
491 {
492     KVMState *s = kvm_state;
493     int mmap_size;
494     int ret = 0;
495 
496     trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
497 
498     ret = kvm_arch_destroy_vcpu(cpu);
499     if (ret < 0) {
500         goto err;
501     }
502 
503     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
504     if (mmap_size < 0) {
505         ret = mmap_size;
506         trace_kvm_failed_get_vcpu_mmap_size();
507         goto err;
508     }
509 
510     ret = munmap(cpu->kvm_run, mmap_size);
511     if (ret < 0) {
512         goto err;
513     }
514 
515     if (cpu->kvm_dirty_gfns) {
516         ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
517         if (ret < 0) {
518             goto err;
519         }
520     }
521 
522     kvm_park_vcpu(cpu);
523 err:
524     return ret;
525 }
526 
kvm_destroy_vcpu(CPUState * cpu)527 void kvm_destroy_vcpu(CPUState *cpu)
528 {
529     if (do_kvm_destroy_vcpu(cpu) < 0) {
530         error_report("kvm_destroy_vcpu failed");
531         exit(EXIT_FAILURE);
532     }
533 }
534 
kvm_init_vcpu(CPUState * cpu,Error ** errp)535 int kvm_init_vcpu(CPUState *cpu, Error **errp)
536 {
537     KVMState *s = kvm_state;
538     int mmap_size;
539     int ret;
540 
541     trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
542 
543     ret = kvm_create_vcpu(cpu);
544     if (ret < 0) {
545         error_setg_errno(errp, -ret,
546                          "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
547                          kvm_arch_vcpu_id(cpu));
548         goto err;
549     }
550 
551     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
552     if (mmap_size < 0) {
553         ret = mmap_size;
554         error_setg_errno(errp, -mmap_size,
555                          "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
556         goto err;
557     }
558 
559     cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
560                         cpu->kvm_fd, 0);
561     if (cpu->kvm_run == MAP_FAILED) {
562         ret = -errno;
563         error_setg_errno(errp, ret,
564                          "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
565                          kvm_arch_vcpu_id(cpu));
566         goto err;
567     }
568 
569     if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
570         s->coalesced_mmio_ring =
571             (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
572     }
573 
574     if (s->kvm_dirty_ring_size) {
575         /* Use MAP_SHARED to share pages with the kernel */
576         cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
577                                    PROT_READ | PROT_WRITE, MAP_SHARED,
578                                    cpu->kvm_fd,
579                                    PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
580         if (cpu->kvm_dirty_gfns == MAP_FAILED) {
581             ret = -errno;
582             goto err;
583         }
584     }
585 
586     ret = kvm_arch_init_vcpu(cpu);
587     if (ret < 0) {
588         error_setg_errno(errp, -ret,
589                          "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
590                          kvm_arch_vcpu_id(cpu));
591     }
592     cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
593 
594 err:
595     return ret;
596 }
597 
598 /*
599  * dirty pages logging control
600  */
601 
kvm_mem_flags(MemoryRegion * mr)602 static int kvm_mem_flags(MemoryRegion *mr)
603 {
604     bool readonly = mr->readonly || memory_region_is_romd(mr);
605     int flags = 0;
606 
607     if (memory_region_get_dirty_log_mask(mr) != 0) {
608         flags |= KVM_MEM_LOG_DIRTY_PAGES;
609     }
610     if (readonly && kvm_readonly_mem_allowed) {
611         flags |= KVM_MEM_READONLY;
612     }
613     if (memory_region_has_guest_memfd(mr)) {
614         assert(kvm_guest_memfd_supported);
615         flags |= KVM_MEM_GUEST_MEMFD;
616     }
617     return flags;
618 }
619 
620 /* Called with KVMMemoryListener.slots_lock held */
kvm_slot_update_flags(KVMMemoryListener * kml,KVMSlot * mem,MemoryRegion * mr)621 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
622                                  MemoryRegion *mr)
623 {
624     mem->flags = kvm_mem_flags(mr);
625 
626     /* If nothing changed effectively, no need to issue ioctl */
627     if (mem->flags == mem->old_flags) {
628         return 0;
629     }
630 
631     kvm_slot_init_dirty_bitmap(mem);
632     return kvm_set_user_memory_region(kml, mem, false);
633 }
634 
kvm_section_update_flags(KVMMemoryListener * kml,MemoryRegionSection * section)635 static int kvm_section_update_flags(KVMMemoryListener *kml,
636                                     MemoryRegionSection *section)
637 {
638     hwaddr start_addr, size, slot_size;
639     KVMSlot *mem;
640     int ret = 0;
641 
642     size = kvm_align_section(section, &start_addr);
643     if (!size) {
644         return 0;
645     }
646 
647     kvm_slots_lock();
648 
649     while (size && !ret) {
650         slot_size = MIN(kvm_max_slot_size, size);
651         mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
652         if (!mem) {
653             /* We don't have a slot if we want to trap every access. */
654             goto out;
655         }
656 
657         ret = kvm_slot_update_flags(kml, mem, section->mr);
658         start_addr += slot_size;
659         size -= slot_size;
660     }
661 
662 out:
663     kvm_slots_unlock();
664     return ret;
665 }
666 
kvm_log_start(MemoryListener * listener,MemoryRegionSection * section,int old,int new)667 static void kvm_log_start(MemoryListener *listener,
668                           MemoryRegionSection *section,
669                           int old, int new)
670 {
671     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
672     int r;
673 
674     if (old != 0) {
675         return;
676     }
677 
678     r = kvm_section_update_flags(kml, section);
679     if (r < 0) {
680         abort();
681     }
682 }
683 
kvm_log_stop(MemoryListener * listener,MemoryRegionSection * section,int old,int new)684 static void kvm_log_stop(MemoryListener *listener,
685                           MemoryRegionSection *section,
686                           int old, int new)
687 {
688     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
689     int r;
690 
691     if (new != 0) {
692         return;
693     }
694 
695     r = kvm_section_update_flags(kml, section);
696     if (r < 0) {
697         abort();
698     }
699 }
700 
701 /* get kvm's dirty pages bitmap and update qemu's */
kvm_slot_sync_dirty_pages(KVMSlot * slot)702 static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
703 {
704     ram_addr_t start = slot->ram_start_offset;
705     ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
706 
707     cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
708 }
709 
kvm_slot_reset_dirty_pages(KVMSlot * slot)710 static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
711 {
712     memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
713 }
714 
715 #define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
716 
717 /* Allocate the dirty bitmap for a slot  */
kvm_slot_init_dirty_bitmap(KVMSlot * mem)718 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
719 {
720     if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
721         return;
722     }
723 
724     /*
725      * XXX bad kernel interface alert
726      * For dirty bitmap, kernel allocates array of size aligned to
727      * bits-per-long.  But for case when the kernel is 64bits and
728      * the userspace is 32bits, userspace can't align to the same
729      * bits-per-long, since sizeof(long) is different between kernel
730      * and user space.  This way, userspace will provide buffer which
731      * may be 4 bytes less than the kernel will use, resulting in
732      * userspace memory corruption (which is not detectable by valgrind
733      * too, in most cases).
734      * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
735      * a hope that sizeof(long) won't become >8 any time soon.
736      *
737      * Note: the granule of kvm dirty log is qemu_real_host_page_size.
738      * And mem->memory_size is aligned to it (otherwise this mem can't
739      * be registered to KVM).
740      */
741     hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
742                                         /*HOST_LONG_BITS*/ 64) / 8;
743     mem->dirty_bmap = g_malloc0(bitmap_size);
744     mem->dirty_bmap_size = bitmap_size;
745 }
746 
747 /*
748  * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
749  * succeeded, false otherwise
750  */
kvm_slot_get_dirty_log(KVMState * s,KVMSlot * slot)751 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
752 {
753     struct kvm_dirty_log d = {};
754     int ret;
755 
756     d.dirty_bitmap = slot->dirty_bmap;
757     d.slot = slot->slot | (slot->as_id << 16);
758     ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
759 
760     if (ret == -ENOENT) {
761         /* kernel does not have dirty bitmap in this slot */
762         ret = 0;
763     }
764     if (ret) {
765         error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
766                           __func__, ret);
767     }
768     return ret == 0;
769 }
770 
771 /* Should be with all slots_lock held for the address spaces. */
kvm_dirty_ring_mark_page(KVMState * s,uint32_t as_id,uint32_t slot_id,uint64_t offset)772 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
773                                      uint32_t slot_id, uint64_t offset)
774 {
775     KVMMemoryListener *kml;
776     KVMSlot *mem;
777 
778     if (as_id >= s->nr_as) {
779         return;
780     }
781 
782     kml = s->as[as_id].ml;
783     mem = &kml->slots[slot_id];
784 
785     if (!mem->memory_size || offset >=
786         (mem->memory_size / qemu_real_host_page_size())) {
787         return;
788     }
789 
790     set_bit(offset, mem->dirty_bmap);
791 }
792 
dirty_gfn_is_dirtied(struct kvm_dirty_gfn * gfn)793 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
794 {
795     /*
796      * Read the flags before the value.  Pairs with barrier in
797      * KVM's kvm_dirty_ring_push() function.
798      */
799     return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
800 }
801 
dirty_gfn_set_collected(struct kvm_dirty_gfn * gfn)802 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
803 {
804     /*
805      * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
806      * sees the full content of the ring:
807      *
808      * CPU0                     CPU1                         CPU2
809      * ------------------------------------------------------------------------------
810      *                                                       fill gfn0
811      *                                                       store-rel flags for gfn0
812      * load-acq flags for gfn0
813      * store-rel RESET for gfn0
814      *                          ioctl(RESET_RINGS)
815      *                            load-acq flags for gfn0
816      *                            check if flags have RESET
817      *
818      * The synchronization goes from CPU2 to CPU0 to CPU1.
819      */
820     qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
821 }
822 
823 /*
824  * Should be with all slots_lock held for the address spaces.  It returns the
825  * dirty page we've collected on this dirty ring.
826  */
kvm_dirty_ring_reap_one(KVMState * s,CPUState * cpu)827 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
828 {
829     struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
830     uint32_t ring_size = s->kvm_dirty_ring_size;
831     uint32_t count = 0, fetch = cpu->kvm_fetch_index;
832 
833     /*
834      * It's possible that we race with vcpu creation code where the vcpu is
835      * put onto the vcpus list but not yet initialized the dirty ring
836      * structures.  If so, skip it.
837      */
838     if (!cpu->created) {
839         return 0;
840     }
841 
842     assert(dirty_gfns && ring_size);
843     trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
844 
845     while (true) {
846         cur = &dirty_gfns[fetch % ring_size];
847         if (!dirty_gfn_is_dirtied(cur)) {
848             break;
849         }
850         kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
851                                  cur->offset);
852         dirty_gfn_set_collected(cur);
853         trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
854         fetch++;
855         count++;
856     }
857     cpu->kvm_fetch_index = fetch;
858     cpu->dirty_pages += count;
859 
860     return count;
861 }
862 
863 /* Must be with slots_lock held */
kvm_dirty_ring_reap_locked(KVMState * s,CPUState * cpu)864 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
865 {
866     int ret;
867     uint64_t total = 0;
868     int64_t stamp;
869 
870     stamp = get_clock();
871 
872     if (cpu) {
873         total = kvm_dirty_ring_reap_one(s, cpu);
874     } else {
875         CPU_FOREACH(cpu) {
876             total += kvm_dirty_ring_reap_one(s, cpu);
877         }
878     }
879 
880     if (total) {
881         ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
882         assert(ret == total);
883     }
884 
885     stamp = get_clock() - stamp;
886 
887     if (total) {
888         trace_kvm_dirty_ring_reap(total, stamp / 1000);
889     }
890 
891     return total;
892 }
893 
894 /*
895  * Currently for simplicity, we must hold BQL before calling this.  We can
896  * consider to drop the BQL if we're clear with all the race conditions.
897  */
kvm_dirty_ring_reap(KVMState * s,CPUState * cpu)898 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
899 {
900     uint64_t total;
901 
902     /*
903      * We need to lock all kvm slots for all address spaces here,
904      * because:
905      *
906      * (1) We need to mark dirty for dirty bitmaps in multiple slots
907      *     and for tons of pages, so it's better to take the lock here
908      *     once rather than once per page.  And more importantly,
909      *
910      * (2) We must _NOT_ publish dirty bits to the other threads
911      *     (e.g., the migration thread) via the kvm memory slot dirty
912      *     bitmaps before correctly re-protect those dirtied pages.
913      *     Otherwise we can have potential risk of data corruption if
914      *     the page data is read in the other thread before we do
915      *     reset below.
916      */
917     kvm_slots_lock();
918     total = kvm_dirty_ring_reap_locked(s, cpu);
919     kvm_slots_unlock();
920 
921     return total;
922 }
923 
do_kvm_cpu_synchronize_kick(CPUState * cpu,run_on_cpu_data arg)924 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
925 {
926     /* No need to do anything */
927 }
928 
929 /*
930  * Kick all vcpus out in a synchronized way.  When returned, we
931  * guarantee that every vcpu has been kicked and at least returned to
932  * userspace once.
933  */
kvm_cpu_synchronize_kick_all(void)934 static void kvm_cpu_synchronize_kick_all(void)
935 {
936     CPUState *cpu;
937 
938     CPU_FOREACH(cpu) {
939         run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
940     }
941 }
942 
943 /*
944  * Flush all the existing dirty pages to the KVM slot buffers.  When
945  * this call returns, we guarantee that all the touched dirty pages
946  * before calling this function have been put into the per-kvmslot
947  * dirty bitmap.
948  *
949  * This function must be called with BQL held.
950  */
kvm_dirty_ring_flush(void)951 static void kvm_dirty_ring_flush(void)
952 {
953     trace_kvm_dirty_ring_flush(0);
954     /*
955      * The function needs to be serialized.  Since this function
956      * should always be with BQL held, serialization is guaranteed.
957      * However, let's be sure of it.
958      */
959     assert(bql_locked());
960     /*
961      * First make sure to flush the hardware buffers by kicking all
962      * vcpus out in a synchronous way.
963      */
964     kvm_cpu_synchronize_kick_all();
965     kvm_dirty_ring_reap(kvm_state, NULL);
966     trace_kvm_dirty_ring_flush(1);
967 }
968 
969 /**
970  * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
971  *
972  * This function will first try to fetch dirty bitmap from the kernel,
973  * and then updates qemu's dirty bitmap.
974  *
975  * NOTE: caller must be with kml->slots_lock held.
976  *
977  * @kml: the KVM memory listener object
978  * @section: the memory section to sync the dirty bitmap with
979  */
kvm_physical_sync_dirty_bitmap(KVMMemoryListener * kml,MemoryRegionSection * section)980 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
981                                            MemoryRegionSection *section)
982 {
983     KVMState *s = kvm_state;
984     KVMSlot *mem;
985     hwaddr start_addr, size;
986     hwaddr slot_size;
987 
988     size = kvm_align_section(section, &start_addr);
989     while (size) {
990         slot_size = MIN(kvm_max_slot_size, size);
991         mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
992         if (!mem) {
993             /* We don't have a slot if we want to trap every access. */
994             return;
995         }
996         if (kvm_slot_get_dirty_log(s, mem)) {
997             kvm_slot_sync_dirty_pages(mem);
998         }
999         start_addr += slot_size;
1000         size -= slot_size;
1001     }
1002 }
1003 
1004 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
1005 #define KVM_CLEAR_LOG_SHIFT  6
1006 #define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
1007 #define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)
1008 
kvm_log_clear_one_slot(KVMSlot * mem,int as_id,uint64_t start,uint64_t size)1009 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
1010                                   uint64_t size)
1011 {
1012     KVMState *s = kvm_state;
1013     uint64_t end, bmap_start, start_delta, bmap_npages;
1014     struct kvm_clear_dirty_log d;
1015     unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
1016     int ret;
1017 
1018     /*
1019      * We need to extend either the start or the size or both to
1020      * satisfy the KVM interface requirement.  Firstly, do the start
1021      * page alignment on 64 host pages
1022      */
1023     bmap_start = start & KVM_CLEAR_LOG_MASK;
1024     start_delta = start - bmap_start;
1025     bmap_start /= psize;
1026 
1027     /*
1028      * The kernel interface has restriction on the size too, that either:
1029      *
1030      * (1) the size is 64 host pages aligned (just like the start), or
1031      * (2) the size fills up until the end of the KVM memslot.
1032      */
1033     bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
1034         << KVM_CLEAR_LOG_SHIFT;
1035     end = mem->memory_size / psize;
1036     if (bmap_npages > end - bmap_start) {
1037         bmap_npages = end - bmap_start;
1038     }
1039     start_delta /= psize;
1040 
1041     /*
1042      * Prepare the bitmap to clear dirty bits.  Here we must guarantee
1043      * that we won't clear any unknown dirty bits otherwise we might
1044      * accidentally clear some set bits which are not yet synced from
1045      * the kernel into QEMU's bitmap, then we'll lose track of the
1046      * guest modifications upon those pages (which can directly lead
1047      * to guest data loss or panic after migration).
1048      *
1049      * Layout of the KVMSlot.dirty_bmap:
1050      *
1051      *                   |<-------- bmap_npages -----------..>|
1052      *                                                     [1]
1053      *                     start_delta         size
1054      *  |----------------|-------------|------------------|------------|
1055      *  ^                ^             ^                               ^
1056      *  |                |             |                               |
1057      * start          bmap_start     (start)                         end
1058      * of memslot                                             of memslot
1059      *
1060      * [1] bmap_npages can be aligned to either 64 pages or the end of slot
1061      */
1062 
1063     assert(bmap_start % BITS_PER_LONG == 0);
1064     /* We should never do log_clear before log_sync */
1065     assert(mem->dirty_bmap);
1066     if (start_delta || bmap_npages - size / psize) {
1067         /* Slow path - we need to manipulate a temp bitmap */
1068         bmap_clear = bitmap_new(bmap_npages);
1069         bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
1070                                     bmap_start, start_delta + size / psize);
1071         /*
1072          * We need to fill the holes at start because that was not
1073          * specified by the caller and we extended the bitmap only for
1074          * 64 pages alignment
1075          */
1076         bitmap_clear(bmap_clear, 0, start_delta);
1077         d.dirty_bitmap = bmap_clear;
1078     } else {
1079         /*
1080          * Fast path - both start and size align well with BITS_PER_LONG
1081          * (or the end of memory slot)
1082          */
1083         d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
1084     }
1085 
1086     d.first_page = bmap_start;
1087     /* It should never overflow.  If it happens, say something */
1088     assert(bmap_npages <= UINT32_MAX);
1089     d.num_pages = bmap_npages;
1090     d.slot = mem->slot | (as_id << 16);
1091 
1092     ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
1093     if (ret < 0 && ret != -ENOENT) {
1094         error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
1095                      "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
1096                      __func__, d.slot, (uint64_t)d.first_page,
1097                      (uint32_t)d.num_pages, ret);
1098     } else {
1099         ret = 0;
1100         trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
1101     }
1102 
1103     /*
1104      * After we have updated the remote dirty bitmap, we update the
1105      * cached bitmap as well for the memslot, then if another user
1106      * clears the same region we know we shouldn't clear it again on
1107      * the remote otherwise it's data loss as well.
1108      */
1109     bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
1110                  size / psize);
1111     /* This handles the NULL case well */
1112     g_free(bmap_clear);
1113     return ret;
1114 }
1115 
1116 
1117 /**
1118  * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1119  *
1120  * NOTE: this will be a no-op if we haven't enabled manual dirty log
1121  * protection in the host kernel because in that case this operation
1122  * will be done within log_sync().
1123  *
1124  * @kml:     the kvm memory listener
1125  * @section: the memory range to clear dirty bitmap
1126  */
kvm_physical_log_clear(KVMMemoryListener * kml,MemoryRegionSection * section)1127 static int kvm_physical_log_clear(KVMMemoryListener *kml,
1128                                   MemoryRegionSection *section)
1129 {
1130     KVMState *s = kvm_state;
1131     uint64_t start, size, offset, count;
1132     KVMSlot *mem;
1133     int ret = 0, i;
1134 
1135     if (!s->manual_dirty_log_protect) {
1136         /* No need to do explicit clear */
1137         return ret;
1138     }
1139 
1140     start = section->offset_within_address_space;
1141     size = int128_get64(section->size);
1142 
1143     if (!size) {
1144         /* Nothing more we can do... */
1145         return ret;
1146     }
1147 
1148     kvm_slots_lock();
1149 
1150     for (i = 0; i < kml->nr_slots_allocated; i++) {
1151         mem = &kml->slots[i];
1152         /* Discard slots that are empty or do not overlap the section */
1153         if (!mem->memory_size ||
1154             mem->start_addr > start + size - 1 ||
1155             start > mem->start_addr + mem->memory_size - 1) {
1156             continue;
1157         }
1158 
1159         if (start >= mem->start_addr) {
1160             /* The slot starts before section or is aligned to it.  */
1161             offset = start - mem->start_addr;
1162             count = MIN(mem->memory_size - offset, size);
1163         } else {
1164             /* The slot starts after section.  */
1165             offset = 0;
1166             count = MIN(mem->memory_size, size - (mem->start_addr - start));
1167         }
1168         ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1169         if (ret < 0) {
1170             break;
1171         }
1172     }
1173 
1174     kvm_slots_unlock();
1175 
1176     return ret;
1177 }
1178 
kvm_coalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1179 static void kvm_coalesce_mmio_region(MemoryListener *listener,
1180                                      MemoryRegionSection *secion,
1181                                      hwaddr start, hwaddr size)
1182 {
1183     KVMState *s = kvm_state;
1184 
1185     if (s->coalesced_mmio) {
1186         struct kvm_coalesced_mmio_zone zone;
1187 
1188         zone.addr = start;
1189         zone.size = size;
1190         zone.pad = 0;
1191 
1192         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1193     }
1194 }
1195 
kvm_uncoalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1196 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1197                                        MemoryRegionSection *secion,
1198                                        hwaddr start, hwaddr size)
1199 {
1200     KVMState *s = kvm_state;
1201 
1202     if (s->coalesced_mmio) {
1203         struct kvm_coalesced_mmio_zone zone;
1204 
1205         zone.addr = start;
1206         zone.size = size;
1207         zone.pad = 0;
1208 
1209         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1210     }
1211 }
1212 
kvm_coalesce_pio_add(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1213 static void kvm_coalesce_pio_add(MemoryListener *listener,
1214                                 MemoryRegionSection *section,
1215                                 hwaddr start, hwaddr size)
1216 {
1217     KVMState *s = kvm_state;
1218 
1219     if (s->coalesced_pio) {
1220         struct kvm_coalesced_mmio_zone zone;
1221 
1222         zone.addr = start;
1223         zone.size = size;
1224         zone.pio = 1;
1225 
1226         (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1227     }
1228 }
1229 
kvm_coalesce_pio_del(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1230 static void kvm_coalesce_pio_del(MemoryListener *listener,
1231                                 MemoryRegionSection *section,
1232                                 hwaddr start, hwaddr size)
1233 {
1234     KVMState *s = kvm_state;
1235 
1236     if (s->coalesced_pio) {
1237         struct kvm_coalesced_mmio_zone zone;
1238 
1239         zone.addr = start;
1240         zone.size = size;
1241         zone.pio = 1;
1242 
1243         (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1244      }
1245 }
1246 
kvm_check_extension(KVMState * s,unsigned int extension)1247 int kvm_check_extension(KVMState *s, unsigned int extension)
1248 {
1249     int ret;
1250 
1251     ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1252     if (ret < 0) {
1253         ret = 0;
1254     }
1255 
1256     return ret;
1257 }
1258 
kvm_vm_check_extension(KVMState * s,unsigned int extension)1259 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1260 {
1261     int ret;
1262 
1263     ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1264     if (ret < 0) {
1265         /* VM wide version not implemented, use global one instead */
1266         ret = kvm_check_extension(s, extension);
1267     }
1268 
1269     return ret;
1270 }
1271 
1272 /*
1273  * We track the poisoned pages to be able to:
1274  * - replace them on VM reset
1275  * - block a migration for a VM with a poisoned page
1276  */
1277 typedef struct HWPoisonPage {
1278     ram_addr_t ram_addr;
1279     QLIST_ENTRY(HWPoisonPage) list;
1280 } HWPoisonPage;
1281 
1282 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1283     QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1284 
kvm_unpoison_all(void * param)1285 static void kvm_unpoison_all(void *param)
1286 {
1287     HWPoisonPage *page, *next_page;
1288 
1289     QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1290         QLIST_REMOVE(page, list);
1291         qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
1292         g_free(page);
1293     }
1294 }
1295 
kvm_hwpoison_page_add(ram_addr_t ram_addr)1296 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1297 {
1298     HWPoisonPage *page;
1299 
1300     QLIST_FOREACH(page, &hwpoison_page_list, list) {
1301         if (page->ram_addr == ram_addr) {
1302             return;
1303         }
1304     }
1305     page = g_new(HWPoisonPage, 1);
1306     page->ram_addr = ram_addr;
1307     QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1308 }
1309 
kvm_hwpoisoned_mem(void)1310 bool kvm_hwpoisoned_mem(void)
1311 {
1312     return !QLIST_EMPTY(&hwpoison_page_list);
1313 }
1314 
adjust_ioeventfd_endianness(uint32_t val,uint32_t size)1315 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1316 {
1317 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1318     /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1319      * endianness, but the memory core hands them in target endianness.
1320      * For example, PPC is always treated as big-endian even if running
1321      * on KVM and on PPC64LE.  Correct here.
1322      */
1323     switch (size) {
1324     case 2:
1325         val = bswap16(val);
1326         break;
1327     case 4:
1328         val = bswap32(val);
1329         break;
1330     }
1331 #endif
1332     return val;
1333 }
1334 
kvm_set_ioeventfd_mmio(int fd,hwaddr addr,uint32_t val,bool assign,uint32_t size,bool datamatch)1335 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1336                                   bool assign, uint32_t size, bool datamatch)
1337 {
1338     int ret;
1339     struct kvm_ioeventfd iofd = {
1340         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1341         .addr = addr,
1342         .len = size,
1343         .flags = 0,
1344         .fd = fd,
1345     };
1346 
1347     trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1348                                  datamatch);
1349     if (!kvm_enabled()) {
1350         return -ENOSYS;
1351     }
1352 
1353     if (datamatch) {
1354         iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1355     }
1356     if (!assign) {
1357         iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1358     }
1359 
1360     ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1361 
1362     if (ret < 0) {
1363         return -errno;
1364     }
1365 
1366     return 0;
1367 }
1368 
kvm_set_ioeventfd_pio(int fd,uint16_t addr,uint16_t val,bool assign,uint32_t size,bool datamatch)1369 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1370                                  bool assign, uint32_t size, bool datamatch)
1371 {
1372     struct kvm_ioeventfd kick = {
1373         .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1374         .addr = addr,
1375         .flags = KVM_IOEVENTFD_FLAG_PIO,
1376         .len = size,
1377         .fd = fd,
1378     };
1379     int r;
1380     trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1381     if (!kvm_enabled()) {
1382         return -ENOSYS;
1383     }
1384     if (datamatch) {
1385         kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1386     }
1387     if (!assign) {
1388         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1389     }
1390     r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1391     if (r < 0) {
1392         return r;
1393     }
1394     return 0;
1395 }
1396 
1397 
1398 static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState * s,const KVMCapabilityInfo * list)1399 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1400 {
1401     while (list->name) {
1402         if (!kvm_check_extension(s, list->value)) {
1403             return list;
1404         }
1405         list++;
1406     }
1407     return NULL;
1408 }
1409 
kvm_set_max_memslot_size(hwaddr max_slot_size)1410 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1411 {
1412     g_assert(
1413         ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1414     );
1415     kvm_max_slot_size = max_slot_size;
1416 }
1417 
kvm_set_memory_attributes(hwaddr start,uint64_t size,uint64_t attr)1418 static int kvm_set_memory_attributes(hwaddr start, uint64_t size, uint64_t attr)
1419 {
1420     struct kvm_memory_attributes attrs;
1421     int r;
1422 
1423     assert((attr & kvm_supported_memory_attributes) == attr);
1424     attrs.attributes = attr;
1425     attrs.address = start;
1426     attrs.size = size;
1427     attrs.flags = 0;
1428 
1429     r = kvm_vm_ioctl(kvm_state, KVM_SET_MEMORY_ATTRIBUTES, &attrs);
1430     if (r) {
1431         error_report("failed to set memory (0x%" HWADDR_PRIx "+0x%" PRIx64 ") "
1432                      "with attr 0x%" PRIx64 " error '%s'",
1433                      start, size, attr, strerror(errno));
1434     }
1435     return r;
1436 }
1437 
kvm_set_memory_attributes_private(hwaddr start,uint64_t size)1438 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size)
1439 {
1440     return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
1441 }
1442 
kvm_set_memory_attributes_shared(hwaddr start,uint64_t size)1443 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size)
1444 {
1445     return kvm_set_memory_attributes(start, size, 0);
1446 }
1447 
1448 /* Called with KVMMemoryListener.slots_lock held */
kvm_set_phys_mem(KVMMemoryListener * kml,MemoryRegionSection * section,bool add)1449 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1450                              MemoryRegionSection *section, bool add)
1451 {
1452     KVMSlot *mem;
1453     int err;
1454     MemoryRegion *mr = section->mr;
1455     bool writable = !mr->readonly && !mr->rom_device;
1456     hwaddr start_addr, size, slot_size, mr_offset;
1457     ram_addr_t ram_start_offset;
1458     void *ram;
1459 
1460     if (!memory_region_is_ram(mr)) {
1461         if (writable || !kvm_readonly_mem_allowed) {
1462             return;
1463         } else if (!mr->romd_mode) {
1464             /* If the memory device is not in romd_mode, then we actually want
1465              * to remove the kvm memory slot so all accesses will trap. */
1466             add = false;
1467         }
1468     }
1469 
1470     size = kvm_align_section(section, &start_addr);
1471     if (!size) {
1472         return;
1473     }
1474 
1475     /* The offset of the kvmslot within the memory region */
1476     mr_offset = section->offset_within_region + start_addr -
1477         section->offset_within_address_space;
1478 
1479     /* use aligned delta to align the ram address and offset */
1480     ram = memory_region_get_ram_ptr(mr) + mr_offset;
1481     ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1482 
1483     if (!add) {
1484         do {
1485             slot_size = MIN(kvm_max_slot_size, size);
1486             mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1487             if (!mem) {
1488                 return;
1489             }
1490             if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1491                 /*
1492                  * NOTE: We should be aware of the fact that here we're only
1493                  * doing a best effort to sync dirty bits.  No matter whether
1494                  * we're using dirty log or dirty ring, we ignored two facts:
1495                  *
1496                  * (1) dirty bits can reside in hardware buffers (PML)
1497                  *
1498                  * (2) after we collected dirty bits here, pages can be dirtied
1499                  * again before we do the final KVM_SET_USER_MEMORY_REGION to
1500                  * remove the slot.
1501                  *
1502                  * Not easy.  Let's cross the fingers until it's fixed.
1503                  */
1504                 if (kvm_state->kvm_dirty_ring_size) {
1505                     kvm_dirty_ring_reap_locked(kvm_state, NULL);
1506                     if (kvm_state->kvm_dirty_ring_with_bitmap) {
1507                         kvm_slot_sync_dirty_pages(mem);
1508                         kvm_slot_get_dirty_log(kvm_state, mem);
1509                     }
1510                 } else {
1511                     kvm_slot_get_dirty_log(kvm_state, mem);
1512                 }
1513                 kvm_slot_sync_dirty_pages(mem);
1514             }
1515 
1516             /* unregister the slot */
1517             g_free(mem->dirty_bmap);
1518             mem->dirty_bmap = NULL;
1519             mem->memory_size = 0;
1520             mem->flags = 0;
1521             err = kvm_set_user_memory_region(kml, mem, false);
1522             if (err) {
1523                 fprintf(stderr, "%s: error unregistering slot: %s\n",
1524                         __func__, strerror(-err));
1525                 abort();
1526             }
1527             start_addr += slot_size;
1528             size -= slot_size;
1529             kml->nr_slots_used--;
1530         } while (size);
1531         return;
1532     }
1533 
1534     /* register the new slot */
1535     do {
1536         slot_size = MIN(kvm_max_slot_size, size);
1537         mem = kvm_alloc_slot(kml);
1538         mem->as_id = kml->as_id;
1539         mem->memory_size = slot_size;
1540         mem->start_addr = start_addr;
1541         mem->ram_start_offset = ram_start_offset;
1542         mem->ram = ram;
1543         mem->flags = kvm_mem_flags(mr);
1544         mem->guest_memfd = mr->ram_block->guest_memfd;
1545         mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host;
1546 
1547         kvm_slot_init_dirty_bitmap(mem);
1548         err = kvm_set_user_memory_region(kml, mem, true);
1549         if (err) {
1550             fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1551                     strerror(-err));
1552             abort();
1553         }
1554 
1555         if (memory_region_has_guest_memfd(mr)) {
1556             err = kvm_set_memory_attributes_private(start_addr, slot_size);
1557             if (err) {
1558                 error_report("%s: failed to set memory attribute private: %s",
1559                              __func__, strerror(-err));
1560                 exit(1);
1561             }
1562         }
1563 
1564         start_addr += slot_size;
1565         ram_start_offset += slot_size;
1566         ram += slot_size;
1567         size -= slot_size;
1568         kml->nr_slots_used++;
1569     } while (size);
1570 }
1571 
kvm_dirty_ring_reaper_thread(void * data)1572 static void *kvm_dirty_ring_reaper_thread(void *data)
1573 {
1574     KVMState *s = data;
1575     struct KVMDirtyRingReaper *r = &s->reaper;
1576 
1577     rcu_register_thread();
1578 
1579     trace_kvm_dirty_ring_reaper("init");
1580 
1581     while (true) {
1582         r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1583         trace_kvm_dirty_ring_reaper("wait");
1584         /*
1585          * TODO: provide a smarter timeout rather than a constant?
1586          */
1587         sleep(1);
1588 
1589         /* keep sleeping so that dirtylimit not be interfered by reaper */
1590         if (dirtylimit_in_service()) {
1591             continue;
1592         }
1593 
1594         trace_kvm_dirty_ring_reaper("wakeup");
1595         r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1596 
1597         bql_lock();
1598         kvm_dirty_ring_reap(s, NULL);
1599         bql_unlock();
1600 
1601         r->reaper_iteration++;
1602     }
1603 
1604     g_assert_not_reached();
1605 }
1606 
kvm_dirty_ring_reaper_init(KVMState * s)1607 static void kvm_dirty_ring_reaper_init(KVMState *s)
1608 {
1609     struct KVMDirtyRingReaper *r = &s->reaper;
1610 
1611     qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1612                        kvm_dirty_ring_reaper_thread,
1613                        s, QEMU_THREAD_JOINABLE);
1614 }
1615 
kvm_dirty_ring_init(KVMState * s)1616 static int kvm_dirty_ring_init(KVMState *s)
1617 {
1618     uint32_t ring_size = s->kvm_dirty_ring_size;
1619     uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
1620     unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
1621     int ret;
1622 
1623     s->kvm_dirty_ring_size = 0;
1624     s->kvm_dirty_ring_bytes = 0;
1625 
1626     /* Bail if the dirty ring size isn't specified */
1627     if (!ring_size) {
1628         return 0;
1629     }
1630 
1631     /*
1632      * Read the max supported pages. Fall back to dirty logging mode
1633      * if the dirty ring isn't supported.
1634      */
1635     ret = kvm_vm_check_extension(s, capability);
1636     if (ret <= 0) {
1637         capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
1638         ret = kvm_vm_check_extension(s, capability);
1639     }
1640 
1641     if (ret <= 0) {
1642         warn_report("KVM dirty ring not available, using bitmap method");
1643         return 0;
1644     }
1645 
1646     if (ring_bytes > ret) {
1647         error_report("KVM dirty ring size %" PRIu32 " too big "
1648                      "(maximum is %ld).  Please use a smaller value.",
1649                      ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
1650         return -EINVAL;
1651     }
1652 
1653     ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
1654     if (ret) {
1655         error_report("Enabling of KVM dirty ring failed: %s. "
1656                      "Suggested minimum value is 1024.", strerror(-ret));
1657         return -EIO;
1658     }
1659 
1660     /* Enable the backup bitmap if it is supported */
1661     ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
1662     if (ret > 0) {
1663         ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
1664         if (ret) {
1665             error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1666                          "%s. ", strerror(-ret));
1667             return -EIO;
1668         }
1669 
1670         s->kvm_dirty_ring_with_bitmap = true;
1671     }
1672 
1673     s->kvm_dirty_ring_size = ring_size;
1674     s->kvm_dirty_ring_bytes = ring_bytes;
1675 
1676     return 0;
1677 }
1678 
kvm_region_add(MemoryListener * listener,MemoryRegionSection * section)1679 static void kvm_region_add(MemoryListener *listener,
1680                            MemoryRegionSection *section)
1681 {
1682     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1683     KVMMemoryUpdate *update;
1684 
1685     update = g_new0(KVMMemoryUpdate, 1);
1686     update->section = *section;
1687 
1688     QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
1689 }
1690 
kvm_region_del(MemoryListener * listener,MemoryRegionSection * section)1691 static void kvm_region_del(MemoryListener *listener,
1692                            MemoryRegionSection *section)
1693 {
1694     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1695     KVMMemoryUpdate *update;
1696 
1697     update = g_new0(KVMMemoryUpdate, 1);
1698     update->section = *section;
1699 
1700     QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
1701 }
1702 
kvm_region_commit(MemoryListener * listener)1703 static void kvm_region_commit(MemoryListener *listener)
1704 {
1705     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
1706                                           listener);
1707     KVMMemoryUpdate *u1, *u2;
1708     bool need_inhibit = false;
1709 
1710     if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
1711         QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1712         return;
1713     }
1714 
1715     /*
1716      * We have to be careful when regions to add overlap with ranges to remove.
1717      * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1718      * is currently active.
1719      *
1720      * The lists are order by addresses, so it's easy to find overlaps.
1721      */
1722     u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1723     u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
1724     while (u1 && u2) {
1725         Range r1, r2;
1726 
1727         range_init_nofail(&r1, u1->section.offset_within_address_space,
1728                           int128_get64(u1->section.size));
1729         range_init_nofail(&r2, u2->section.offset_within_address_space,
1730                           int128_get64(u2->section.size));
1731 
1732         if (range_overlaps_range(&r1, &r2)) {
1733             need_inhibit = true;
1734             break;
1735         }
1736         if (range_lob(&r1) < range_lob(&r2)) {
1737             u1 = QSIMPLEQ_NEXT(u1, next);
1738         } else {
1739             u2 = QSIMPLEQ_NEXT(u2, next);
1740         }
1741     }
1742 
1743     kvm_slots_lock();
1744     if (need_inhibit) {
1745         accel_ioctl_inhibit_begin();
1746     }
1747 
1748     /* Remove all memslots before adding the new ones. */
1749     while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1750         u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1751         QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
1752 
1753         kvm_set_phys_mem(kml, &u1->section, false);
1754         memory_region_unref(u1->section.mr);
1755 
1756         g_free(u1);
1757     }
1758     while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
1759         u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
1760         QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
1761 
1762         memory_region_ref(u1->section.mr);
1763         kvm_set_phys_mem(kml, &u1->section, true);
1764 
1765         g_free(u1);
1766     }
1767 
1768     if (need_inhibit) {
1769         accel_ioctl_inhibit_end();
1770     }
1771     kvm_slots_unlock();
1772 }
1773 
kvm_log_sync(MemoryListener * listener,MemoryRegionSection * section)1774 static void kvm_log_sync(MemoryListener *listener,
1775                          MemoryRegionSection *section)
1776 {
1777     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1778 
1779     kvm_slots_lock();
1780     kvm_physical_sync_dirty_bitmap(kml, section);
1781     kvm_slots_unlock();
1782 }
1783 
kvm_log_sync_global(MemoryListener * l,bool last_stage)1784 static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
1785 {
1786     KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1787     KVMState *s = kvm_state;
1788     KVMSlot *mem;
1789     int i;
1790 
1791     /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1792     kvm_dirty_ring_flush();
1793 
1794     kvm_slots_lock();
1795     for (i = 0; i < kml->nr_slots_allocated; i++) {
1796         mem = &kml->slots[i];
1797         if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1798             kvm_slot_sync_dirty_pages(mem);
1799 
1800             if (s->kvm_dirty_ring_with_bitmap && last_stage &&
1801                 kvm_slot_get_dirty_log(s, mem)) {
1802                 kvm_slot_sync_dirty_pages(mem);
1803             }
1804 
1805             /*
1806              * This is not needed by KVM_GET_DIRTY_LOG because the
1807              * ioctl will unconditionally overwrite the whole region.
1808              * However kvm dirty ring has no such side effect.
1809              */
1810             kvm_slot_reset_dirty_pages(mem);
1811         }
1812     }
1813     kvm_slots_unlock();
1814 }
1815 
kvm_log_clear(MemoryListener * listener,MemoryRegionSection * section)1816 static void kvm_log_clear(MemoryListener *listener,
1817                           MemoryRegionSection *section)
1818 {
1819     KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1820     int r;
1821 
1822     r = kvm_physical_log_clear(kml, section);
1823     if (r < 0) {
1824         error_report_once("%s: kvm log clear failed: mr=%s "
1825                           "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1826                           section->mr->name, section->offset_within_region,
1827                           int128_get64(section->size));
1828         abort();
1829     }
1830 }
1831 
kvm_mem_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1832 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1833                                   MemoryRegionSection *section,
1834                                   bool match_data, uint64_t data,
1835                                   EventNotifier *e)
1836 {
1837     int fd = event_notifier_get_fd(e);
1838     int r;
1839 
1840     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1841                                data, true, int128_get64(section->size),
1842                                match_data);
1843     if (r < 0) {
1844         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1845                 __func__, strerror(-r), -r);
1846         abort();
1847     }
1848 }
1849 
kvm_mem_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1850 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1851                                   MemoryRegionSection *section,
1852                                   bool match_data, uint64_t data,
1853                                   EventNotifier *e)
1854 {
1855     int fd = event_notifier_get_fd(e);
1856     int r;
1857 
1858     r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1859                                data, false, int128_get64(section->size),
1860                                match_data);
1861     if (r < 0) {
1862         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1863                 __func__, strerror(-r), -r);
1864         abort();
1865     }
1866 }
1867 
kvm_io_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1868 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1869                                  MemoryRegionSection *section,
1870                                  bool match_data, uint64_t data,
1871                                  EventNotifier *e)
1872 {
1873     int fd = event_notifier_get_fd(e);
1874     int r;
1875 
1876     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1877                               data, true, int128_get64(section->size),
1878                               match_data);
1879     if (r < 0) {
1880         fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1881                 __func__, strerror(-r), -r);
1882         abort();
1883     }
1884 }
1885 
kvm_io_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1886 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1887                                  MemoryRegionSection *section,
1888                                  bool match_data, uint64_t data,
1889                                  EventNotifier *e)
1890 
1891 {
1892     int fd = event_notifier_get_fd(e);
1893     int r;
1894 
1895     r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1896                               data, false, int128_get64(section->size),
1897                               match_data);
1898     if (r < 0) {
1899         fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1900                 __func__, strerror(-r), -r);
1901         abort();
1902     }
1903 }
1904 
kvm_memory_listener_register(KVMState * s,KVMMemoryListener * kml,AddressSpace * as,int as_id,const char * name)1905 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1906                                   AddressSpace *as, int as_id, const char *name)
1907 {
1908     int i;
1909 
1910     kml->as_id = as_id;
1911 
1912     kvm_slots_grow(kml, KVM_MEMSLOTS_NR_ALLOC_DEFAULT);
1913 
1914     QSIMPLEQ_INIT(&kml->transaction_add);
1915     QSIMPLEQ_INIT(&kml->transaction_del);
1916 
1917     kml->listener.region_add = kvm_region_add;
1918     kml->listener.region_del = kvm_region_del;
1919     kml->listener.commit = kvm_region_commit;
1920     kml->listener.log_start = kvm_log_start;
1921     kml->listener.log_stop = kvm_log_stop;
1922     kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
1923     kml->listener.name = name;
1924 
1925     if (s->kvm_dirty_ring_size) {
1926         kml->listener.log_sync_global = kvm_log_sync_global;
1927     } else {
1928         kml->listener.log_sync = kvm_log_sync;
1929         kml->listener.log_clear = kvm_log_clear;
1930     }
1931 
1932     memory_listener_register(&kml->listener, as);
1933 
1934     for (i = 0; i < s->nr_as; ++i) {
1935         if (!s->as[i].as) {
1936             s->as[i].as = as;
1937             s->as[i].ml = kml;
1938             break;
1939         }
1940     }
1941 }
1942 
1943 static MemoryListener kvm_io_listener = {
1944     .name = "kvm-io",
1945     .coalesced_io_add = kvm_coalesce_pio_add,
1946     .coalesced_io_del = kvm_coalesce_pio_del,
1947     .eventfd_add = kvm_io_ioeventfd_add,
1948     .eventfd_del = kvm_io_ioeventfd_del,
1949     .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
1950 };
1951 
kvm_set_irq(KVMState * s,int irq,int level)1952 int kvm_set_irq(KVMState *s, int irq, int level)
1953 {
1954     struct kvm_irq_level event;
1955     int ret;
1956 
1957     assert(kvm_async_interrupts_enabled());
1958 
1959     event.level = level;
1960     event.irq = irq;
1961     ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1962     if (ret < 0) {
1963         perror("kvm_set_irq");
1964         abort();
1965     }
1966 
1967     return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1968 }
1969 
1970 #ifdef KVM_CAP_IRQ_ROUTING
1971 typedef struct KVMMSIRoute {
1972     struct kvm_irq_routing_entry kroute;
1973     QTAILQ_ENTRY(KVMMSIRoute) entry;
1974 } KVMMSIRoute;
1975 
set_gsi(KVMState * s,unsigned int gsi)1976 static void set_gsi(KVMState *s, unsigned int gsi)
1977 {
1978     set_bit(gsi, s->used_gsi_bitmap);
1979 }
1980 
clear_gsi(KVMState * s,unsigned int gsi)1981 static void clear_gsi(KVMState *s, unsigned int gsi)
1982 {
1983     clear_bit(gsi, s->used_gsi_bitmap);
1984 }
1985 
kvm_init_irq_routing(KVMState * s)1986 void kvm_init_irq_routing(KVMState *s)
1987 {
1988     int gsi_count;
1989 
1990     gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1991     if (gsi_count > 0) {
1992         /* Round up so we can search ints using ffs */
1993         s->used_gsi_bitmap = bitmap_new(gsi_count);
1994         s->gsi_count = gsi_count;
1995     }
1996 
1997     s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1998     s->nr_allocated_irq_routes = 0;
1999 
2000     kvm_arch_init_irq_routing(s);
2001 }
2002 
kvm_irqchip_commit_routes(KVMState * s)2003 void kvm_irqchip_commit_routes(KVMState *s)
2004 {
2005     int ret;
2006 
2007     if (kvm_gsi_direct_mapping()) {
2008         return;
2009     }
2010 
2011     if (!kvm_gsi_routing_enabled()) {
2012         return;
2013     }
2014 
2015     s->irq_routes->flags = 0;
2016     trace_kvm_irqchip_commit_routes();
2017     ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
2018     assert(ret == 0);
2019 }
2020 
kvm_add_routing_entry(KVMState * s,struct kvm_irq_routing_entry * entry)2021 void kvm_add_routing_entry(KVMState *s,
2022                            struct kvm_irq_routing_entry *entry)
2023 {
2024     struct kvm_irq_routing_entry *new;
2025     int n, size;
2026 
2027     if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
2028         n = s->nr_allocated_irq_routes * 2;
2029         if (n < 64) {
2030             n = 64;
2031         }
2032         size = sizeof(struct kvm_irq_routing);
2033         size += n * sizeof(*new);
2034         s->irq_routes = g_realloc(s->irq_routes, size);
2035         s->nr_allocated_irq_routes = n;
2036     }
2037     n = s->irq_routes->nr++;
2038     new = &s->irq_routes->entries[n];
2039 
2040     *new = *entry;
2041 
2042     set_gsi(s, entry->gsi);
2043 }
2044 
kvm_update_routing_entry(KVMState * s,struct kvm_irq_routing_entry * new_entry)2045 static int kvm_update_routing_entry(KVMState *s,
2046                                     struct kvm_irq_routing_entry *new_entry)
2047 {
2048     struct kvm_irq_routing_entry *entry;
2049     int n;
2050 
2051     for (n = 0; n < s->irq_routes->nr; n++) {
2052         entry = &s->irq_routes->entries[n];
2053         if (entry->gsi != new_entry->gsi) {
2054             continue;
2055         }
2056 
2057         if(!memcmp(entry, new_entry, sizeof *entry)) {
2058             return 0;
2059         }
2060 
2061         *entry = *new_entry;
2062 
2063         return 0;
2064     }
2065 
2066     return -ESRCH;
2067 }
2068 
kvm_irqchip_add_irq_route(KVMState * s,int irq,int irqchip,int pin)2069 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
2070 {
2071     struct kvm_irq_routing_entry e = {};
2072 
2073     assert(pin < s->gsi_count);
2074 
2075     e.gsi = irq;
2076     e.type = KVM_IRQ_ROUTING_IRQCHIP;
2077     e.flags = 0;
2078     e.u.irqchip.irqchip = irqchip;
2079     e.u.irqchip.pin = pin;
2080     kvm_add_routing_entry(s, &e);
2081 }
2082 
kvm_irqchip_release_virq(KVMState * s,int virq)2083 void kvm_irqchip_release_virq(KVMState *s, int virq)
2084 {
2085     struct kvm_irq_routing_entry *e;
2086     int i;
2087 
2088     if (kvm_gsi_direct_mapping()) {
2089         return;
2090     }
2091 
2092     for (i = 0; i < s->irq_routes->nr; i++) {
2093         e = &s->irq_routes->entries[i];
2094         if (e->gsi == virq) {
2095             s->irq_routes->nr--;
2096             *e = s->irq_routes->entries[s->irq_routes->nr];
2097         }
2098     }
2099     clear_gsi(s, virq);
2100     kvm_arch_release_virq_post(virq);
2101     trace_kvm_irqchip_release_virq(virq);
2102 }
2103 
kvm_irqchip_add_change_notifier(Notifier * n)2104 void kvm_irqchip_add_change_notifier(Notifier *n)
2105 {
2106     notifier_list_add(&kvm_irqchip_change_notifiers, n);
2107 }
2108 
kvm_irqchip_remove_change_notifier(Notifier * n)2109 void kvm_irqchip_remove_change_notifier(Notifier *n)
2110 {
2111     notifier_remove(n);
2112 }
2113 
kvm_irqchip_change_notify(void)2114 void kvm_irqchip_change_notify(void)
2115 {
2116     notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
2117 }
2118 
kvm_irqchip_get_virq(KVMState * s)2119 int kvm_irqchip_get_virq(KVMState *s)
2120 {
2121     int next_virq;
2122 
2123     /* Return the lowest unused GSI in the bitmap */
2124     next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
2125     if (next_virq >= s->gsi_count) {
2126         return -ENOSPC;
2127     } else {
2128         return next_virq;
2129     }
2130 }
2131 
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2132 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2133 {
2134     struct kvm_msi msi;
2135 
2136     msi.address_lo = (uint32_t)msg.address;
2137     msi.address_hi = msg.address >> 32;
2138     msi.data = le32_to_cpu(msg.data);
2139     msi.flags = 0;
2140     memset(msi.pad, 0, sizeof(msi.pad));
2141 
2142     return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
2143 }
2144 
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2145 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2146 {
2147     struct kvm_irq_routing_entry kroute = {};
2148     int virq;
2149     KVMState *s = c->s;
2150     MSIMessage msg = {0, 0};
2151 
2152     if (pci_available && dev) {
2153         msg = pci_get_msi_message(dev, vector);
2154     }
2155 
2156     if (kvm_gsi_direct_mapping()) {
2157         return kvm_arch_msi_data_to_gsi(msg.data);
2158     }
2159 
2160     if (!kvm_gsi_routing_enabled()) {
2161         return -ENOSYS;
2162     }
2163 
2164     virq = kvm_irqchip_get_virq(s);
2165     if (virq < 0) {
2166         return virq;
2167     }
2168 
2169     kroute.gsi = virq;
2170     kroute.type = KVM_IRQ_ROUTING_MSI;
2171     kroute.flags = 0;
2172     kroute.u.msi.address_lo = (uint32_t)msg.address;
2173     kroute.u.msi.address_hi = msg.address >> 32;
2174     kroute.u.msi.data = le32_to_cpu(msg.data);
2175     if (pci_available && kvm_msi_devid_required()) {
2176         kroute.flags = KVM_MSI_VALID_DEVID;
2177         kroute.u.msi.devid = pci_requester_id(dev);
2178     }
2179     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2180         kvm_irqchip_release_virq(s, virq);
2181         return -EINVAL;
2182     }
2183 
2184     if (s->irq_routes->nr < s->gsi_count) {
2185         trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2186                                         vector, virq);
2187 
2188         kvm_add_routing_entry(s, &kroute);
2189         kvm_arch_add_msi_route_post(&kroute, vector, dev);
2190         c->changes++;
2191     } else {
2192         kvm_irqchip_release_virq(s, virq);
2193         return -ENOSPC;
2194     }
2195 
2196     return virq;
2197 }
2198 
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg,PCIDevice * dev)2199 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2200                                  PCIDevice *dev)
2201 {
2202     struct kvm_irq_routing_entry kroute = {};
2203 
2204     if (kvm_gsi_direct_mapping()) {
2205         return 0;
2206     }
2207 
2208     if (!kvm_irqchip_in_kernel()) {
2209         return -ENOSYS;
2210     }
2211 
2212     kroute.gsi = virq;
2213     kroute.type = KVM_IRQ_ROUTING_MSI;
2214     kroute.flags = 0;
2215     kroute.u.msi.address_lo = (uint32_t)msg.address;
2216     kroute.u.msi.address_hi = msg.address >> 32;
2217     kroute.u.msi.data = le32_to_cpu(msg.data);
2218     if (pci_available && kvm_msi_devid_required()) {
2219         kroute.flags = KVM_MSI_VALID_DEVID;
2220         kroute.u.msi.devid = pci_requester_id(dev);
2221     }
2222     if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2223         return -EINVAL;
2224     }
2225 
2226     trace_kvm_irqchip_update_msi_route(virq);
2227 
2228     return kvm_update_routing_entry(s, &kroute);
2229 }
2230 
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2231 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2232                                     EventNotifier *resample, int virq,
2233                                     bool assign)
2234 {
2235     int fd = event_notifier_get_fd(event);
2236     int rfd = resample ? event_notifier_get_fd(resample) : -1;
2237 
2238     struct kvm_irqfd irqfd = {
2239         .fd = fd,
2240         .gsi = virq,
2241         .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2242     };
2243 
2244     if (rfd != -1) {
2245         assert(assign);
2246         if (kvm_irqchip_is_split()) {
2247             /*
2248              * When the slow irqchip (e.g. IOAPIC) is in the
2249              * userspace, KVM kernel resamplefd will not work because
2250              * the EOI of the interrupt will be delivered to userspace
2251              * instead, so the KVM kernel resamplefd kick will be
2252              * skipped.  The userspace here mimics what the kernel
2253              * provides with resamplefd, remember the resamplefd and
2254              * kick it when we receive EOI of this IRQ.
2255              *
2256              * This is hackery because IOAPIC is mostly bypassed
2257              * (except EOI broadcasts) when irqfd is used.  However
2258              * this can bring much performance back for split irqchip
2259              * with INTx IRQs (for VFIO, this gives 93% perf of the
2260              * full fast path, which is 46% perf boost comparing to
2261              * the INTx slow path).
2262              */
2263             kvm_resample_fd_insert(virq, resample);
2264         } else {
2265             irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2266             irqfd.resamplefd = rfd;
2267         }
2268     } else if (!assign) {
2269         if (kvm_irqchip_is_split()) {
2270             kvm_resample_fd_remove(virq);
2271         }
2272     }
2273 
2274     return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2275 }
2276 
2277 #else /* !KVM_CAP_IRQ_ROUTING */
2278 
kvm_init_irq_routing(KVMState * s)2279 void kvm_init_irq_routing(KVMState *s)
2280 {
2281 }
2282 
kvm_irqchip_release_virq(KVMState * s,int virq)2283 void kvm_irqchip_release_virq(KVMState *s, int virq)
2284 {
2285 }
2286 
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2287 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2288 {
2289     abort();
2290 }
2291 
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2292 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2293 {
2294     return -ENOSYS;
2295 }
2296 
kvm_irqchip_add_adapter_route(KVMState * s,AdapterInfo * adapter)2297 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2298 {
2299     return -ENOSYS;
2300 }
2301 
kvm_irqchip_add_hv_sint_route(KVMState * s,uint32_t vcpu,uint32_t sint)2302 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2303 {
2304     return -ENOSYS;
2305 }
2306 
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2307 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2308                                     EventNotifier *resample, int virq,
2309                                     bool assign)
2310 {
2311     abort();
2312 }
2313 
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg)2314 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2315 {
2316     return -ENOSYS;
2317 }
2318 #endif /* !KVM_CAP_IRQ_ROUTING */
2319 
kvm_irqchip_add_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,EventNotifier * rn,int virq)2320 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2321                                        EventNotifier *rn, int virq)
2322 {
2323     return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2324 }
2325 
kvm_irqchip_remove_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,int virq)2326 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2327                                           int virq)
2328 {
2329     return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2330 }
2331 
kvm_irqchip_add_irqfd_notifier(KVMState * s,EventNotifier * n,EventNotifier * rn,qemu_irq irq)2332 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2333                                    EventNotifier *rn, qemu_irq irq)
2334 {
2335     gpointer key, gsi;
2336     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2337 
2338     if (!found) {
2339         return -ENXIO;
2340     }
2341     return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2342 }
2343 
kvm_irqchip_remove_irqfd_notifier(KVMState * s,EventNotifier * n,qemu_irq irq)2344 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2345                                       qemu_irq irq)
2346 {
2347     gpointer key, gsi;
2348     gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2349 
2350     if (!found) {
2351         return -ENXIO;
2352     }
2353     return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2354 }
2355 
kvm_irqchip_set_qemuirq_gsi(KVMState * s,qemu_irq irq,int gsi)2356 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2357 {
2358     g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2359 }
2360 
kvm_irqchip_create(KVMState * s)2361 static void kvm_irqchip_create(KVMState *s)
2362 {
2363     int ret;
2364 
2365     assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2366     if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2367         ;
2368     } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2369         ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2370         if (ret < 0) {
2371             fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2372             exit(1);
2373         }
2374     } else {
2375         return;
2376     }
2377 
2378     if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
2379         fprintf(stderr, "kvm: irqfd not implemented\n");
2380         exit(1);
2381     }
2382 
2383     /* First probe and see if there's a arch-specific hook to create the
2384      * in-kernel irqchip for us */
2385     ret = kvm_arch_irqchip_create(s);
2386     if (ret == 0) {
2387         if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2388             error_report("Split IRQ chip mode not supported.");
2389             exit(1);
2390         } else {
2391             ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2392         }
2393     }
2394     if (ret < 0) {
2395         fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2396         exit(1);
2397     }
2398 
2399     kvm_kernel_irqchip = true;
2400     /* If we have an in-kernel IRQ chip then we must have asynchronous
2401      * interrupt delivery (though the reverse is not necessarily true)
2402      */
2403     kvm_async_interrupts_allowed = true;
2404     kvm_halt_in_kernel_allowed = true;
2405 
2406     kvm_init_irq_routing(s);
2407 
2408     s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2409 }
2410 
2411 /* Find number of supported CPUs using the recommended
2412  * procedure from the kernel API documentation to cope with
2413  * older kernels that may be missing capabilities.
2414  */
kvm_recommended_vcpus(KVMState * s)2415 static int kvm_recommended_vcpus(KVMState *s)
2416 {
2417     int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2418     return (ret) ? ret : 4;
2419 }
2420 
kvm_max_vcpus(KVMState * s)2421 static int kvm_max_vcpus(KVMState *s)
2422 {
2423     int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
2424     return (ret) ? ret : kvm_recommended_vcpus(s);
2425 }
2426 
kvm_max_vcpu_id(KVMState * s)2427 static int kvm_max_vcpu_id(KVMState *s)
2428 {
2429     int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2430     return (ret) ? ret : kvm_max_vcpus(s);
2431 }
2432 
kvm_vcpu_id_is_valid(int vcpu_id)2433 bool kvm_vcpu_id_is_valid(int vcpu_id)
2434 {
2435     KVMState *s = KVM_STATE(current_accel());
2436     return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2437 }
2438 
kvm_dirty_ring_enabled(void)2439 bool kvm_dirty_ring_enabled(void)
2440 {
2441     return kvm_state && kvm_state->kvm_dirty_ring_size;
2442 }
2443 
2444 static void query_stats_cb(StatsResultList **result, StatsTarget target,
2445                            strList *names, strList *targets, Error **errp);
2446 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2447 
kvm_dirty_ring_size(void)2448 uint32_t kvm_dirty_ring_size(void)
2449 {
2450     return kvm_state->kvm_dirty_ring_size;
2451 }
2452 
do_kvm_create_vm(MachineState * ms,int type)2453 static int do_kvm_create_vm(MachineState *ms, int type)
2454 {
2455     KVMState *s;
2456     int ret;
2457 
2458     s = KVM_STATE(ms->accelerator);
2459 
2460     do {
2461         ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2462     } while (ret == -EINTR);
2463 
2464     if (ret < 0) {
2465         error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret));
2466 
2467 #ifdef TARGET_S390X
2468         if (ret == -EINVAL) {
2469             error_printf("Host kernel setup problem detected."
2470                          " Please verify:\n");
2471             error_printf("- for kernels supporting the"
2472                         " switch_amode or user_mode parameters, whether");
2473             error_printf(" user space is running in primary address space\n");
2474             error_printf("- for kernels supporting the vm.allocate_pgste"
2475                          " sysctl, whether it is enabled\n");
2476         }
2477 #elif defined(TARGET_PPC)
2478         if (ret == -EINVAL) {
2479             error_printf("PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2480                          (type == 2) ? "pr" : "hv");
2481         }
2482 #endif
2483     }
2484 
2485     return ret;
2486 }
2487 
find_kvm_machine_type(MachineState * ms)2488 static int find_kvm_machine_type(MachineState *ms)
2489 {
2490     MachineClass *mc = MACHINE_GET_CLASS(ms);
2491     int type;
2492 
2493     if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2494         g_autofree char *kvm_type;
2495         kvm_type = object_property_get_str(OBJECT(current_machine),
2496                                            "kvm-type",
2497                                            &error_abort);
2498         type = mc->kvm_type(ms, kvm_type);
2499     } else if (mc->kvm_type) {
2500         type = mc->kvm_type(ms, NULL);
2501     } else {
2502         type = kvm_arch_get_default_type(ms);
2503     }
2504     return type;
2505 }
2506 
kvm_setup_dirty_ring(KVMState * s)2507 static int kvm_setup_dirty_ring(KVMState *s)
2508 {
2509     uint64_t dirty_log_manual_caps;
2510     int ret;
2511 
2512     /*
2513      * Enable KVM dirty ring if supported, otherwise fall back to
2514      * dirty logging mode
2515      */
2516     ret = kvm_dirty_ring_init(s);
2517     if (ret < 0) {
2518         return ret;
2519     }
2520 
2521     /*
2522      * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2523      * enabled.  More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2524      * page is wr-protected initially, which is against how kvm dirty ring is
2525      * usage - kvm dirty ring requires all pages are wr-protected at the very
2526      * beginning.  Enabling this feature for dirty ring causes data corruption.
2527      *
2528      * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2529      * we may expect a higher stall time when starting the migration.  In the
2530      * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2531      * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2532      * guest pages.
2533      */
2534     if (!s->kvm_dirty_ring_size) {
2535         dirty_log_manual_caps =
2536             kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2537         dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2538                                   KVM_DIRTY_LOG_INITIALLY_SET);
2539         s->manual_dirty_log_protect = dirty_log_manual_caps;
2540         if (dirty_log_manual_caps) {
2541             ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2542                                     dirty_log_manual_caps);
2543             if (ret) {
2544                 warn_report("Trying to enable capability %"PRIu64" of "
2545                             "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2546                             "Falling back to the legacy mode. ",
2547                             dirty_log_manual_caps);
2548                 s->manual_dirty_log_protect = 0;
2549             }
2550         }
2551     }
2552 
2553     return 0;
2554 }
2555 
kvm_init(MachineState * ms)2556 static int kvm_init(MachineState *ms)
2557 {
2558     MachineClass *mc = MACHINE_GET_CLASS(ms);
2559     static const char upgrade_note[] =
2560         "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2561         "(see http://sourceforge.net/projects/kvm).\n";
2562     const struct {
2563         const char *name;
2564         int num;
2565     } num_cpus[] = {
2566         { "SMP",          ms->smp.cpus },
2567         { "hotpluggable", ms->smp.max_cpus },
2568         { /* end of list */ }
2569     }, *nc = num_cpus;
2570     int soft_vcpus_limit, hard_vcpus_limit;
2571     KVMState *s;
2572     const KVMCapabilityInfo *missing_cap;
2573     int ret;
2574     int type;
2575 
2576     qemu_mutex_init(&kml_slots_lock);
2577 
2578     s = KVM_STATE(ms->accelerator);
2579 
2580     /*
2581      * On systems where the kernel can support different base page
2582      * sizes, host page size may be different from TARGET_PAGE_SIZE,
2583      * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
2584      * page size for the system though.
2585      */
2586     assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2587 
2588     s->sigmask_len = 8;
2589     accel_blocker_init();
2590 
2591 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2592     QTAILQ_INIT(&s->kvm_sw_breakpoints);
2593 #endif
2594     QLIST_INIT(&s->kvm_parked_vcpus);
2595     s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR);
2596     if (s->fd == -1) {
2597         error_report("Could not access KVM kernel module: %m");
2598         ret = -errno;
2599         goto err;
2600     }
2601 
2602     ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2603     if (ret < KVM_API_VERSION) {
2604         if (ret >= 0) {
2605             ret = -EINVAL;
2606         }
2607         error_report("kvm version too old");
2608         goto err;
2609     }
2610 
2611     if (ret > KVM_API_VERSION) {
2612         ret = -EINVAL;
2613         error_report("kvm version not supported");
2614         goto err;
2615     }
2616 
2617     kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2618     s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2619 
2620     /* If unspecified, use the default value */
2621     if (!s->nr_slots_max) {
2622         s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT;
2623     }
2624 
2625     type = find_kvm_machine_type(ms);
2626     if (type < 0) {
2627         ret = -EINVAL;
2628         goto err;
2629     }
2630 
2631     ret = do_kvm_create_vm(ms, type);
2632     if (ret < 0) {
2633         goto err;
2634     }
2635 
2636     s->vmfd = ret;
2637 
2638     s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2639     if (s->nr_as <= 1) {
2640         s->nr_as = 1;
2641     }
2642     s->as = g_new0(struct KVMAs, s->nr_as);
2643 
2644     /* check the vcpu limits */
2645     soft_vcpus_limit = kvm_recommended_vcpus(s);
2646     hard_vcpus_limit = kvm_max_vcpus(s);
2647 
2648     while (nc->name) {
2649         if (nc->num > soft_vcpus_limit) {
2650             warn_report("Number of %s cpus requested (%d) exceeds "
2651                         "the recommended cpus supported by KVM (%d)",
2652                         nc->name, nc->num, soft_vcpus_limit);
2653 
2654             if (nc->num > hard_vcpus_limit) {
2655                 error_report("Number of %s cpus requested (%d) exceeds "
2656                              "the maximum cpus supported by KVM (%d)",
2657                              nc->name, nc->num, hard_vcpus_limit);
2658                 exit(1);
2659             }
2660         }
2661         nc++;
2662     }
2663 
2664     missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2665     if (!missing_cap) {
2666         missing_cap =
2667             kvm_check_extension_list(s, kvm_arch_required_capabilities);
2668     }
2669     if (missing_cap) {
2670         ret = -EINVAL;
2671         error_report("kvm does not support %s", missing_cap->name);
2672         error_printf("%s", upgrade_note);
2673         goto err;
2674     }
2675 
2676     s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2677     s->coalesced_pio = s->coalesced_mmio &&
2678                        kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2679 
2680     ret = kvm_setup_dirty_ring(s);
2681     if (ret < 0) {
2682         goto err;
2683     }
2684 
2685 #ifdef KVM_CAP_VCPU_EVENTS
2686     s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2687 #endif
2688     s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2689 
2690     s->irq_set_ioctl = KVM_IRQ_LINE;
2691     if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2692         s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2693     }
2694 
2695     kvm_readonly_mem_allowed =
2696         (kvm_vm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2697 
2698     kvm_resamplefds_allowed =
2699         (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2700 
2701     kvm_vm_attributes_allowed =
2702         (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2703 
2704 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2705     kvm_has_guest_debug =
2706         (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2707 #endif
2708 
2709     kvm_sstep_flags = 0;
2710     if (kvm_has_guest_debug) {
2711         kvm_sstep_flags = SSTEP_ENABLE;
2712 
2713 #if defined TARGET_KVM_HAVE_GUEST_DEBUG
2714         int guest_debug_flags =
2715             kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2716 
2717         if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2718             kvm_sstep_flags |= SSTEP_NOIRQ;
2719         }
2720 #endif
2721     }
2722 
2723     kvm_state = s;
2724 
2725     ret = kvm_arch_init(ms, s);
2726     if (ret < 0) {
2727         goto err;
2728     }
2729 
2730     kvm_supported_memory_attributes = kvm_vm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
2731     kvm_guest_memfd_supported =
2732         kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
2733         kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
2734         (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
2735 
2736     if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2737         s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2738     }
2739 
2740     qemu_register_reset(kvm_unpoison_all, NULL);
2741     qemu_register_reset(kvm_reset_parked_vcpus, s);
2742 
2743     if (s->kernel_irqchip_allowed) {
2744         kvm_irqchip_create(s);
2745     }
2746 
2747     s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2748     s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2749     s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2750     s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2751 
2752     kvm_memory_listener_register(s, &s->memory_listener,
2753                                  &address_space_memory, 0, "kvm-memory");
2754     memory_listener_register(&kvm_io_listener,
2755                              &address_space_io);
2756 
2757     s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2758     if (!s->sync_mmu) {
2759         ret = ram_block_discard_disable(true);
2760         assert(!ret);
2761     }
2762 
2763     if (s->kvm_dirty_ring_size) {
2764         kvm_dirty_ring_reaper_init(s);
2765     }
2766 
2767     if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2768         add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2769                             query_stats_schemas_cb);
2770     }
2771 
2772     return 0;
2773 
2774 err:
2775     assert(ret < 0);
2776     if (s->vmfd >= 0) {
2777         close(s->vmfd);
2778     }
2779     if (s->fd != -1) {
2780         close(s->fd);
2781     }
2782     g_free(s->as);
2783     g_free(s->memory_listener.slots);
2784 
2785     return ret;
2786 }
2787 
kvm_set_sigmask_len(KVMState * s,unsigned int sigmask_len)2788 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2789 {
2790     s->sigmask_len = sigmask_len;
2791 }
2792 
kvm_handle_io(uint16_t port,MemTxAttrs attrs,void * data,int direction,int size,uint32_t count)2793 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2794                           int size, uint32_t count)
2795 {
2796     int i;
2797     uint8_t *ptr = data;
2798 
2799     for (i = 0; i < count; i++) {
2800         address_space_rw(&address_space_io, port, attrs,
2801                          ptr, size,
2802                          direction == KVM_EXIT_IO_OUT);
2803         ptr += size;
2804     }
2805 }
2806 
kvm_handle_internal_error(CPUState * cpu,struct kvm_run * run)2807 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2808 {
2809     int i;
2810 
2811     fprintf(stderr, "KVM internal error. Suberror: %d\n",
2812             run->internal.suberror);
2813 
2814     for (i = 0; i < run->internal.ndata; ++i) {
2815         fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2816                 i, (uint64_t)run->internal.data[i]);
2817     }
2818     if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2819         fprintf(stderr, "emulation failure\n");
2820         if (!kvm_arch_stop_on_emulation_error(cpu)) {
2821             cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2822             return EXCP_INTERRUPT;
2823         }
2824     }
2825     /* FIXME: Should trigger a qmp message to let management know
2826      * something went wrong.
2827      */
2828     return -1;
2829 }
2830 
kvm_flush_coalesced_mmio_buffer(void)2831 void kvm_flush_coalesced_mmio_buffer(void)
2832 {
2833     KVMState *s = kvm_state;
2834 
2835     if (!s || s->coalesced_flush_in_progress) {
2836         return;
2837     }
2838 
2839     s->coalesced_flush_in_progress = true;
2840 
2841     if (s->coalesced_mmio_ring) {
2842         struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2843         while (ring->first != ring->last) {
2844             struct kvm_coalesced_mmio *ent;
2845 
2846             ent = &ring->coalesced_mmio[ring->first];
2847 
2848             if (ent->pio == 1) {
2849                 address_space_write(&address_space_io, ent->phys_addr,
2850                                     MEMTXATTRS_UNSPECIFIED, ent->data,
2851                                     ent->len);
2852             } else {
2853                 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2854             }
2855             smp_wmb();
2856             ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2857         }
2858     }
2859 
2860     s->coalesced_flush_in_progress = false;
2861 }
2862 
do_kvm_cpu_synchronize_state(CPUState * cpu,run_on_cpu_data arg)2863 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2864 {
2865     if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2866         Error *err = NULL;
2867         int ret = kvm_arch_get_registers(cpu, &err);
2868         if (ret) {
2869             if (err) {
2870                 error_reportf_err(err, "Failed to synchronize CPU state: ");
2871             } else {
2872                 error_report("Failed to get registers: %s", strerror(-ret));
2873             }
2874 
2875             cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2876             vm_stop(RUN_STATE_INTERNAL_ERROR);
2877         }
2878 
2879         cpu->vcpu_dirty = true;
2880     }
2881 }
2882 
kvm_cpu_synchronize_state(CPUState * cpu)2883 void kvm_cpu_synchronize_state(CPUState *cpu)
2884 {
2885     if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2886         run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2887     }
2888 }
2889 
do_kvm_cpu_synchronize_post_reset(CPUState * cpu,run_on_cpu_data arg)2890 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2891 {
2892     Error *err = NULL;
2893     int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE, &err);
2894     if (ret) {
2895         if (err) {
2896             error_reportf_err(err, "Restoring resisters after reset: ");
2897         } else {
2898             error_report("Failed to put registers after reset: %s",
2899                          strerror(-ret));
2900         }
2901         cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2902         vm_stop(RUN_STATE_INTERNAL_ERROR);
2903     }
2904 
2905     cpu->vcpu_dirty = false;
2906 }
2907 
kvm_cpu_synchronize_post_reset(CPUState * cpu)2908 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2909 {
2910     run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2911 }
2912 
do_kvm_cpu_synchronize_post_init(CPUState * cpu,run_on_cpu_data arg)2913 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2914 {
2915     Error *err = NULL;
2916     int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE, &err);
2917     if (ret) {
2918         if (err) {
2919             error_reportf_err(err, "Putting registers after init: ");
2920         } else {
2921             error_report("Failed to put registers after init: %s",
2922                          strerror(-ret));
2923         }
2924         exit(1);
2925     }
2926 
2927     cpu->vcpu_dirty = false;
2928 }
2929 
kvm_cpu_synchronize_post_init(CPUState * cpu)2930 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2931 {
2932     if (!kvm_state->guest_state_protected) {
2933         /*
2934          * This runs before the machine_init_done notifiers, and is the last
2935          * opportunity to synchronize the state of confidential guests.
2936          */
2937         run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2938     }
2939 }
2940 
do_kvm_cpu_synchronize_pre_loadvm(CPUState * cpu,run_on_cpu_data arg)2941 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2942 {
2943     cpu->vcpu_dirty = true;
2944 }
2945 
kvm_cpu_synchronize_pre_loadvm(CPUState * cpu)2946 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2947 {
2948     run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2949 }
2950 
2951 #ifdef KVM_HAVE_MCE_INJECTION
2952 static __thread void *pending_sigbus_addr;
2953 static __thread int pending_sigbus_code;
2954 static __thread bool have_sigbus_pending;
2955 #endif
2956 
kvm_cpu_kick(CPUState * cpu)2957 static void kvm_cpu_kick(CPUState *cpu)
2958 {
2959     qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2960 }
2961 
kvm_cpu_kick_self(void)2962 static void kvm_cpu_kick_self(void)
2963 {
2964     if (kvm_immediate_exit) {
2965         kvm_cpu_kick(current_cpu);
2966     } else {
2967         qemu_cpu_kick_self();
2968     }
2969 }
2970 
kvm_eat_signals(CPUState * cpu)2971 static void kvm_eat_signals(CPUState *cpu)
2972 {
2973     struct timespec ts = { 0, 0 };
2974     siginfo_t siginfo;
2975     sigset_t waitset;
2976     sigset_t chkset;
2977     int r;
2978 
2979     if (kvm_immediate_exit) {
2980         qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2981         /* Write kvm_run->immediate_exit before the cpu->exit_request
2982          * write in kvm_cpu_exec.
2983          */
2984         smp_wmb();
2985         return;
2986     }
2987 
2988     sigemptyset(&waitset);
2989     sigaddset(&waitset, SIG_IPI);
2990 
2991     do {
2992         r = sigtimedwait(&waitset, &siginfo, &ts);
2993         if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2994             perror("sigtimedwait");
2995             exit(1);
2996         }
2997 
2998         r = sigpending(&chkset);
2999         if (r == -1) {
3000             perror("sigpending");
3001             exit(1);
3002         }
3003     } while (sigismember(&chkset, SIG_IPI));
3004 }
3005 
kvm_convert_memory(hwaddr start,hwaddr size,bool to_private)3006 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
3007 {
3008     MemoryRegionSection section;
3009     ram_addr_t offset;
3010     MemoryRegion *mr;
3011     RAMBlock *rb;
3012     void *addr;
3013     int ret = -1;
3014 
3015     trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared");
3016 
3017     if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) ||
3018         !QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) {
3019         return -1;
3020     }
3021 
3022     if (!size) {
3023         return -1;
3024     }
3025 
3026     section = memory_region_find(get_system_memory(), start, size);
3027     mr = section.mr;
3028     if (!mr) {
3029         /*
3030          * Ignore converting non-assigned region to shared.
3031          *
3032          * TDX requires vMMIO region to be shared to inject #VE to guest.
3033          * OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region,
3034          * and vIO-APIC 0xFEC00000 4K page.
3035          * OVMF assigns 32bit PCI MMIO region to
3036          * [top of low memory: typically 2GB=0xC000000,  0xFC00000)
3037          */
3038         if (!to_private) {
3039             return 0;
3040         }
3041         return -1;
3042     }
3043 
3044     if (!memory_region_has_guest_memfd(mr)) {
3045         /*
3046          * Because vMMIO region must be shared, guest TD may convert vMMIO
3047          * region to shared explicitly.  Don't complain such case.  See
3048          * memory_region_type() for checking if the region is MMIO region.
3049          */
3050         if (!to_private &&
3051             !memory_region_is_ram(mr) &&
3052             !memory_region_is_ram_device(mr) &&
3053             !memory_region_is_rom(mr) &&
3054             !memory_region_is_romd(mr)) {
3055             ret = 0;
3056         } else {
3057             error_report("Convert non guest_memfd backed memory region "
3058                         "(0x%"HWADDR_PRIx" ,+ 0x%"HWADDR_PRIx") to %s",
3059                         start, size, to_private ? "private" : "shared");
3060         }
3061         goto out_unref;
3062     }
3063 
3064     if (to_private) {
3065         ret = kvm_set_memory_attributes_private(start, size);
3066     } else {
3067         ret = kvm_set_memory_attributes_shared(start, size);
3068     }
3069     if (ret) {
3070         goto out_unref;
3071     }
3072 
3073     addr = memory_region_get_ram_ptr(mr) + section.offset_within_region;
3074     rb = qemu_ram_block_from_host(addr, false, &offset);
3075 
3076     if (to_private) {
3077         if (rb->page_size != qemu_real_host_page_size()) {
3078             /*
3079              * shared memory is backed by hugetlb, which is supposed to be
3080              * pre-allocated and doesn't need to be discarded
3081              */
3082             goto out_unref;
3083         }
3084         ret = ram_block_discard_range(rb, offset, size);
3085     } else {
3086         ret = ram_block_discard_guest_memfd_range(rb, offset, size);
3087     }
3088 
3089 out_unref:
3090     memory_region_unref(mr);
3091     return ret;
3092 }
3093 
kvm_cpu_exec(CPUState * cpu)3094 int kvm_cpu_exec(CPUState *cpu)
3095 {
3096     struct kvm_run *run = cpu->kvm_run;
3097     int ret, run_ret;
3098 
3099     trace_kvm_cpu_exec();
3100 
3101     if (kvm_arch_process_async_events(cpu)) {
3102         qatomic_set(&cpu->exit_request, 0);
3103         return EXCP_HLT;
3104     }
3105 
3106     bql_unlock();
3107     cpu_exec_start(cpu);
3108 
3109     do {
3110         MemTxAttrs attrs;
3111 
3112         if (cpu->vcpu_dirty) {
3113             Error *err = NULL;
3114             ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE, &err);
3115             if (ret) {
3116                 if (err) {
3117                     error_reportf_err(err, "Putting registers after init: ");
3118                 } else {
3119                     error_report("Failed to put registers after init: %s",
3120                                  strerror(-ret));
3121                 }
3122                 ret = -1;
3123                 break;
3124             }
3125 
3126             cpu->vcpu_dirty = false;
3127         }
3128 
3129         kvm_arch_pre_run(cpu, run);
3130         if (qatomic_read(&cpu->exit_request)) {
3131             trace_kvm_interrupt_exit_request();
3132             /*
3133              * KVM requires us to reenter the kernel after IO exits to complete
3134              * instruction emulation. This self-signal will ensure that we
3135              * leave ASAP again.
3136              */
3137             kvm_cpu_kick_self();
3138         }
3139 
3140         /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
3141          * Matching barrier in kvm_eat_signals.
3142          */
3143         smp_rmb();
3144 
3145         run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
3146 
3147         attrs = kvm_arch_post_run(cpu, run);
3148 
3149 #ifdef KVM_HAVE_MCE_INJECTION
3150         if (unlikely(have_sigbus_pending)) {
3151             bql_lock();
3152             kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
3153                                     pending_sigbus_addr);
3154             have_sigbus_pending = false;
3155             bql_unlock();
3156         }
3157 #endif
3158 
3159         if (run_ret < 0) {
3160             if (run_ret == -EINTR || run_ret == -EAGAIN) {
3161                 trace_kvm_io_window_exit();
3162                 kvm_eat_signals(cpu);
3163                 ret = EXCP_INTERRUPT;
3164                 break;
3165             }
3166             if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) {
3167                 fprintf(stderr, "error: kvm run failed %s\n",
3168                         strerror(-run_ret));
3169 #ifdef TARGET_PPC
3170                 if (run_ret == -EBUSY) {
3171                     fprintf(stderr,
3172                             "This is probably because your SMT is enabled.\n"
3173                             "VCPU can only run on primary threads with all "
3174                             "secondary threads offline.\n");
3175                 }
3176 #endif
3177                 ret = -1;
3178                 break;
3179             }
3180         }
3181 
3182         trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
3183         switch (run->exit_reason) {
3184         case KVM_EXIT_IO:
3185             /* Called outside BQL */
3186             kvm_handle_io(run->io.port, attrs,
3187                           (uint8_t *)run + run->io.data_offset,
3188                           run->io.direction,
3189                           run->io.size,
3190                           run->io.count);
3191             ret = 0;
3192             break;
3193         case KVM_EXIT_MMIO:
3194             /* Called outside BQL */
3195             address_space_rw(&address_space_memory,
3196                              run->mmio.phys_addr, attrs,
3197                              run->mmio.data,
3198                              run->mmio.len,
3199                              run->mmio.is_write);
3200             ret = 0;
3201             break;
3202         case KVM_EXIT_IRQ_WINDOW_OPEN:
3203             ret = EXCP_INTERRUPT;
3204             break;
3205         case KVM_EXIT_SHUTDOWN:
3206             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3207             ret = EXCP_INTERRUPT;
3208             break;
3209         case KVM_EXIT_UNKNOWN:
3210             fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
3211                     (uint64_t)run->hw.hardware_exit_reason);
3212             ret = -1;
3213             break;
3214         case KVM_EXIT_INTERNAL_ERROR:
3215             ret = kvm_handle_internal_error(cpu, run);
3216             break;
3217         case KVM_EXIT_DIRTY_RING_FULL:
3218             /*
3219              * We shouldn't continue if the dirty ring of this vcpu is
3220              * still full.  Got kicked by KVM_RESET_DIRTY_RINGS.
3221              */
3222             trace_kvm_dirty_ring_full(cpu->cpu_index);
3223             bql_lock();
3224             /*
3225              * We throttle vCPU by making it sleep once it exit from kernel
3226              * due to dirty ring full. In the dirtylimit scenario, reaping
3227              * all vCPUs after a single vCPU dirty ring get full result in
3228              * the miss of sleep, so just reap the ring-fulled vCPU.
3229              */
3230             if (dirtylimit_in_service()) {
3231                 kvm_dirty_ring_reap(kvm_state, cpu);
3232             } else {
3233                 kvm_dirty_ring_reap(kvm_state, NULL);
3234             }
3235             bql_unlock();
3236             dirtylimit_vcpu_execute(cpu);
3237             ret = 0;
3238             break;
3239         case KVM_EXIT_SYSTEM_EVENT:
3240             trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type);
3241             switch (run->system_event.type) {
3242             case KVM_SYSTEM_EVENT_SHUTDOWN:
3243                 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
3244                 ret = EXCP_INTERRUPT;
3245                 break;
3246             case KVM_SYSTEM_EVENT_RESET:
3247                 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3248                 ret = EXCP_INTERRUPT;
3249                 break;
3250             case KVM_SYSTEM_EVENT_CRASH:
3251                 kvm_cpu_synchronize_state(cpu);
3252                 bql_lock();
3253                 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3254                 bql_unlock();
3255                 ret = 0;
3256                 break;
3257             default:
3258                 ret = kvm_arch_handle_exit(cpu, run);
3259                 break;
3260             }
3261             break;
3262         case KVM_EXIT_MEMORY_FAULT:
3263             trace_kvm_memory_fault(run->memory_fault.gpa,
3264                                    run->memory_fault.size,
3265                                    run->memory_fault.flags);
3266             if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) {
3267                 error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64,
3268                              (uint64_t)run->memory_fault.flags);
3269                 ret = -1;
3270                 break;
3271             }
3272             ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size,
3273                                      run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE);
3274             break;
3275         default:
3276             ret = kvm_arch_handle_exit(cpu, run);
3277             break;
3278         }
3279     } while (ret == 0);
3280 
3281     cpu_exec_end(cpu);
3282     bql_lock();
3283 
3284     if (ret < 0) {
3285         cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
3286         vm_stop(RUN_STATE_INTERNAL_ERROR);
3287     }
3288 
3289     qatomic_set(&cpu->exit_request, 0);
3290     return ret;
3291 }
3292 
kvm_ioctl(KVMState * s,unsigned long type,...)3293 int kvm_ioctl(KVMState *s, unsigned long type, ...)
3294 {
3295     int ret;
3296     void *arg;
3297     va_list ap;
3298 
3299     va_start(ap, type);
3300     arg = va_arg(ap, void *);
3301     va_end(ap);
3302 
3303     trace_kvm_ioctl(type, arg);
3304     ret = ioctl(s->fd, type, arg);
3305     if (ret == -1) {
3306         ret = -errno;
3307     }
3308     return ret;
3309 }
3310 
kvm_vm_ioctl(KVMState * s,unsigned long type,...)3311 int kvm_vm_ioctl(KVMState *s, unsigned long type, ...)
3312 {
3313     int ret;
3314     void *arg;
3315     va_list ap;
3316 
3317     va_start(ap, type);
3318     arg = va_arg(ap, void *);
3319     va_end(ap);
3320 
3321     trace_kvm_vm_ioctl(type, arg);
3322     accel_ioctl_begin();
3323     ret = ioctl(s->vmfd, type, arg);
3324     accel_ioctl_end();
3325     if (ret == -1) {
3326         ret = -errno;
3327     }
3328     return ret;
3329 }
3330 
kvm_vcpu_ioctl(CPUState * cpu,unsigned long type,...)3331 int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...)
3332 {
3333     int ret;
3334     void *arg;
3335     va_list ap;
3336 
3337     va_start(ap, type);
3338     arg = va_arg(ap, void *);
3339     va_end(ap);
3340 
3341     trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3342     accel_cpu_ioctl_begin(cpu);
3343     ret = ioctl(cpu->kvm_fd, type, arg);
3344     accel_cpu_ioctl_end(cpu);
3345     if (ret == -1) {
3346         ret = -errno;
3347     }
3348     return ret;
3349 }
3350 
kvm_device_ioctl(int fd,unsigned long type,...)3351 int kvm_device_ioctl(int fd, unsigned long type, ...)
3352 {
3353     int ret;
3354     void *arg;
3355     va_list ap;
3356 
3357     va_start(ap, type);
3358     arg = va_arg(ap, void *);
3359     va_end(ap);
3360 
3361     trace_kvm_device_ioctl(fd, type, arg);
3362     accel_ioctl_begin();
3363     ret = ioctl(fd, type, arg);
3364     accel_ioctl_end();
3365     if (ret == -1) {
3366         ret = -errno;
3367     }
3368     return ret;
3369 }
3370 
kvm_vm_check_attr(KVMState * s,uint32_t group,uint64_t attr)3371 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3372 {
3373     int ret;
3374     struct kvm_device_attr attribute = {
3375         .group = group,
3376         .attr = attr,
3377     };
3378 
3379     if (!kvm_vm_attributes_allowed) {
3380         return 0;
3381     }
3382 
3383     ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3384     /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3385     return ret ? 0 : 1;
3386 }
3387 
kvm_device_check_attr(int dev_fd,uint32_t group,uint64_t attr)3388 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3389 {
3390     struct kvm_device_attr attribute = {
3391         .group = group,
3392         .attr = attr,
3393         .flags = 0,
3394     };
3395 
3396     return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3397 }
3398 
kvm_device_access(int fd,int group,uint64_t attr,void * val,bool write,Error ** errp)3399 int kvm_device_access(int fd, int group, uint64_t attr,
3400                       void *val, bool write, Error **errp)
3401 {
3402     struct kvm_device_attr kvmattr;
3403     int err;
3404 
3405     kvmattr.flags = 0;
3406     kvmattr.group = group;
3407     kvmattr.attr = attr;
3408     kvmattr.addr = (uintptr_t)val;
3409 
3410     err = kvm_device_ioctl(fd,
3411                            write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3412                            &kvmattr);
3413     if (err < 0) {
3414         error_setg_errno(errp, -err,
3415                          "KVM_%s_DEVICE_ATTR failed: Group %d "
3416                          "attr 0x%016" PRIx64,
3417                          write ? "SET" : "GET", group, attr);
3418     }
3419     return err;
3420 }
3421 
kvm_has_sync_mmu(void)3422 bool kvm_has_sync_mmu(void)
3423 {
3424     return kvm_state->sync_mmu;
3425 }
3426 
kvm_has_vcpu_events(void)3427 int kvm_has_vcpu_events(void)
3428 {
3429     return kvm_state->vcpu_events;
3430 }
3431 
kvm_max_nested_state_length(void)3432 int kvm_max_nested_state_length(void)
3433 {
3434     return kvm_state->max_nested_state_len;
3435 }
3436 
kvm_has_gsi_routing(void)3437 int kvm_has_gsi_routing(void)
3438 {
3439 #ifdef KVM_CAP_IRQ_ROUTING
3440     return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3441 #else
3442     return false;
3443 #endif
3444 }
3445 
kvm_arm_supports_user_irq(void)3446 bool kvm_arm_supports_user_irq(void)
3447 {
3448     return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3449 }
3450 
3451 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
kvm_find_sw_breakpoint(CPUState * cpu,vaddr pc)3452 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
3453 {
3454     struct kvm_sw_breakpoint *bp;
3455 
3456     QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3457         if (bp->pc == pc) {
3458             return bp;
3459         }
3460     }
3461     return NULL;
3462 }
3463 
kvm_sw_breakpoints_active(CPUState * cpu)3464 int kvm_sw_breakpoints_active(CPUState *cpu)
3465 {
3466     return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3467 }
3468 
3469 struct kvm_set_guest_debug_data {
3470     struct kvm_guest_debug dbg;
3471     int err;
3472 };
3473 
kvm_invoke_set_guest_debug(CPUState * cpu,run_on_cpu_data data)3474 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3475 {
3476     struct kvm_set_guest_debug_data *dbg_data =
3477         (struct kvm_set_guest_debug_data *) data.host_ptr;
3478 
3479     dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3480                                    &dbg_data->dbg);
3481 }
3482 
kvm_update_guest_debug(CPUState * cpu,unsigned long reinject_trap)3483 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3484 {
3485     struct kvm_set_guest_debug_data data;
3486 
3487     data.dbg.control = reinject_trap;
3488 
3489     if (cpu->singlestep_enabled) {
3490         data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3491 
3492         if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3493             data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3494         }
3495     }
3496     kvm_arch_update_guest_debug(cpu, &data.dbg);
3497 
3498     run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3499                RUN_ON_CPU_HOST_PTR(&data));
3500     return data.err;
3501 }
3502 
kvm_supports_guest_debug(void)3503 bool kvm_supports_guest_debug(void)
3504 {
3505     /* probed during kvm_init() */
3506     return kvm_has_guest_debug;
3507 }
3508 
kvm_insert_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3509 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3510 {
3511     struct kvm_sw_breakpoint *bp;
3512     int err;
3513 
3514     if (type == GDB_BREAKPOINT_SW) {
3515         bp = kvm_find_sw_breakpoint(cpu, addr);
3516         if (bp) {
3517             bp->use_count++;
3518             return 0;
3519         }
3520 
3521         bp = g_new(struct kvm_sw_breakpoint, 1);
3522         bp->pc = addr;
3523         bp->use_count = 1;
3524         err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3525         if (err) {
3526             g_free(bp);
3527             return err;
3528         }
3529 
3530         QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3531     } else {
3532         err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3533         if (err) {
3534             return err;
3535         }
3536     }
3537 
3538     CPU_FOREACH(cpu) {
3539         err = kvm_update_guest_debug(cpu, 0);
3540         if (err) {
3541             return err;
3542         }
3543     }
3544     return 0;
3545 }
3546 
kvm_remove_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3547 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3548 {
3549     struct kvm_sw_breakpoint *bp;
3550     int err;
3551 
3552     if (type == GDB_BREAKPOINT_SW) {
3553         bp = kvm_find_sw_breakpoint(cpu, addr);
3554         if (!bp) {
3555             return -ENOENT;
3556         }
3557 
3558         if (bp->use_count > 1) {
3559             bp->use_count--;
3560             return 0;
3561         }
3562 
3563         err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3564         if (err) {
3565             return err;
3566         }
3567 
3568         QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3569         g_free(bp);
3570     } else {
3571         err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3572         if (err) {
3573             return err;
3574         }
3575     }
3576 
3577     CPU_FOREACH(cpu) {
3578         err = kvm_update_guest_debug(cpu, 0);
3579         if (err) {
3580             return err;
3581         }
3582     }
3583     return 0;
3584 }
3585 
kvm_remove_all_breakpoints(CPUState * cpu)3586 void kvm_remove_all_breakpoints(CPUState *cpu)
3587 {
3588     struct kvm_sw_breakpoint *bp, *next;
3589     KVMState *s = cpu->kvm_state;
3590     CPUState *tmpcpu;
3591 
3592     QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3593         if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3594             /* Try harder to find a CPU that currently sees the breakpoint. */
3595             CPU_FOREACH(tmpcpu) {
3596                 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3597                     break;
3598                 }
3599             }
3600         }
3601         QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3602         g_free(bp);
3603     }
3604     kvm_arch_remove_all_hw_breakpoints();
3605 
3606     CPU_FOREACH(cpu) {
3607         kvm_update_guest_debug(cpu, 0);
3608     }
3609 }
3610 
3611 #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */
3612 
kvm_set_signal_mask(CPUState * cpu,const sigset_t * sigset)3613 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3614 {
3615     KVMState *s = kvm_state;
3616     struct kvm_signal_mask *sigmask;
3617     int r;
3618 
3619     sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3620 
3621     sigmask->len = s->sigmask_len;
3622     memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3623     r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3624     g_free(sigmask);
3625 
3626     return r;
3627 }
3628 
kvm_ipi_signal(int sig)3629 static void kvm_ipi_signal(int sig)
3630 {
3631     if (current_cpu) {
3632         assert(kvm_immediate_exit);
3633         kvm_cpu_kick(current_cpu);
3634     }
3635 }
3636 
kvm_init_cpu_signals(CPUState * cpu)3637 void kvm_init_cpu_signals(CPUState *cpu)
3638 {
3639     int r;
3640     sigset_t set;
3641     struct sigaction sigact;
3642 
3643     memset(&sigact, 0, sizeof(sigact));
3644     sigact.sa_handler = kvm_ipi_signal;
3645     sigaction(SIG_IPI, &sigact, NULL);
3646 
3647     pthread_sigmask(SIG_BLOCK, NULL, &set);
3648 #if defined KVM_HAVE_MCE_INJECTION
3649     sigdelset(&set, SIGBUS);
3650     pthread_sigmask(SIG_SETMASK, &set, NULL);
3651 #endif
3652     sigdelset(&set, SIG_IPI);
3653     if (kvm_immediate_exit) {
3654         r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3655     } else {
3656         r = kvm_set_signal_mask(cpu, &set);
3657     }
3658     if (r) {
3659         fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3660         exit(1);
3661     }
3662 }
3663 
3664 /* Called asynchronously in VCPU thread.  */
kvm_on_sigbus_vcpu(CPUState * cpu,int code,void * addr)3665 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3666 {
3667 #ifdef KVM_HAVE_MCE_INJECTION
3668     if (have_sigbus_pending) {
3669         return 1;
3670     }
3671     have_sigbus_pending = true;
3672     pending_sigbus_addr = addr;
3673     pending_sigbus_code = code;
3674     qatomic_set(&cpu->exit_request, 1);
3675     return 0;
3676 #else
3677     return 1;
3678 #endif
3679 }
3680 
3681 /* Called synchronously (via signalfd) in main thread.  */
kvm_on_sigbus(int code,void * addr)3682 int kvm_on_sigbus(int code, void *addr)
3683 {
3684 #ifdef KVM_HAVE_MCE_INJECTION
3685     /* Action required MCE kills the process if SIGBUS is blocked.  Because
3686      * that's what happens in the I/O thread, where we handle MCE via signalfd,
3687      * we can only get action optional here.
3688      */
3689     assert(code != BUS_MCEERR_AR);
3690     kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3691     return 0;
3692 #else
3693     return 1;
3694 #endif
3695 }
3696 
kvm_create_device(KVMState * s,uint64_t type,bool test)3697 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3698 {
3699     int ret;
3700     struct kvm_create_device create_dev;
3701 
3702     create_dev.type = type;
3703     create_dev.fd = -1;
3704     create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3705 
3706     if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3707         return -ENOTSUP;
3708     }
3709 
3710     ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3711     if (ret) {
3712         return ret;
3713     }
3714 
3715     return test ? 0 : create_dev.fd;
3716 }
3717 
kvm_device_supported(int vmfd,uint64_t type)3718 bool kvm_device_supported(int vmfd, uint64_t type)
3719 {
3720     struct kvm_create_device create_dev = {
3721         .type = type,
3722         .fd = -1,
3723         .flags = KVM_CREATE_DEVICE_TEST,
3724     };
3725 
3726     if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3727         return false;
3728     }
3729 
3730     return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3731 }
3732 
kvm_set_one_reg(CPUState * cs,uint64_t id,void * source)3733 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3734 {
3735     struct kvm_one_reg reg;
3736     int r;
3737 
3738     reg.id = id;
3739     reg.addr = (uintptr_t) source;
3740     r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
3741     if (r) {
3742         trace_kvm_failed_reg_set(id, strerror(-r));
3743     }
3744     return r;
3745 }
3746 
kvm_get_one_reg(CPUState * cs,uint64_t id,void * target)3747 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3748 {
3749     struct kvm_one_reg reg;
3750     int r;
3751 
3752     reg.id = id;
3753     reg.addr = (uintptr_t) target;
3754     r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
3755     if (r) {
3756         trace_kvm_failed_reg_get(id, strerror(-r));
3757     }
3758     return r;
3759 }
3760 
kvm_accel_has_memory(MachineState * ms,AddressSpace * as,hwaddr start_addr,hwaddr size)3761 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3762                                  hwaddr start_addr, hwaddr size)
3763 {
3764     KVMState *kvm = KVM_STATE(ms->accelerator);
3765     int i;
3766 
3767     for (i = 0; i < kvm->nr_as; ++i) {
3768         if (kvm->as[i].as == as && kvm->as[i].ml) {
3769             size = MIN(kvm_max_slot_size, size);
3770             return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3771                                                     start_addr, size);
3772         }
3773     }
3774 
3775     return false;
3776 }
3777 
kvm_get_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3778 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3779                                    const char *name, void *opaque,
3780                                    Error **errp)
3781 {
3782     KVMState *s = KVM_STATE(obj);
3783     int64_t value = s->kvm_shadow_mem;
3784 
3785     visit_type_int(v, name, &value, errp);
3786 }
3787 
kvm_set_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3788 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3789                                    const char *name, void *opaque,
3790                                    Error **errp)
3791 {
3792     KVMState *s = KVM_STATE(obj);
3793     int64_t value;
3794 
3795     if (s->fd != -1) {
3796         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3797         return;
3798     }
3799 
3800     if (!visit_type_int(v, name, &value, errp)) {
3801         return;
3802     }
3803 
3804     s->kvm_shadow_mem = value;
3805 }
3806 
kvm_set_kernel_irqchip(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3807 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3808                                    const char *name, void *opaque,
3809                                    Error **errp)
3810 {
3811     KVMState *s = KVM_STATE(obj);
3812     OnOffSplit mode;
3813 
3814     if (s->fd != -1) {
3815         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3816         return;
3817     }
3818 
3819     if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3820         return;
3821     }
3822     switch (mode) {
3823     case ON_OFF_SPLIT_ON:
3824         s->kernel_irqchip_allowed = true;
3825         s->kernel_irqchip_required = true;
3826         s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3827         break;
3828     case ON_OFF_SPLIT_OFF:
3829         s->kernel_irqchip_allowed = false;
3830         s->kernel_irqchip_required = false;
3831         s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3832         break;
3833     case ON_OFF_SPLIT_SPLIT:
3834         s->kernel_irqchip_allowed = true;
3835         s->kernel_irqchip_required = true;
3836         s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3837         break;
3838     default:
3839         /* The value was checked in visit_type_OnOffSplit() above. If
3840          * we get here, then something is wrong in QEMU.
3841          */
3842         abort();
3843     }
3844 }
3845 
kvm_kernel_irqchip_allowed(void)3846 bool kvm_kernel_irqchip_allowed(void)
3847 {
3848     return kvm_state->kernel_irqchip_allowed;
3849 }
3850 
kvm_kernel_irqchip_required(void)3851 bool kvm_kernel_irqchip_required(void)
3852 {
3853     return kvm_state->kernel_irqchip_required;
3854 }
3855 
kvm_kernel_irqchip_split(void)3856 bool kvm_kernel_irqchip_split(void)
3857 {
3858     return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3859 }
3860 
kvm_get_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3861 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3862                                     const char *name, void *opaque,
3863                                     Error **errp)
3864 {
3865     KVMState *s = KVM_STATE(obj);
3866     uint32_t value = s->kvm_dirty_ring_size;
3867 
3868     visit_type_uint32(v, name, &value, errp);
3869 }
3870 
kvm_set_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3871 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3872                                     const char *name, void *opaque,
3873                                     Error **errp)
3874 {
3875     KVMState *s = KVM_STATE(obj);
3876     uint32_t value;
3877 
3878     if (s->fd != -1) {
3879         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3880         return;
3881     }
3882 
3883     if (!visit_type_uint32(v, name, &value, errp)) {
3884         return;
3885     }
3886     if (value & (value - 1)) {
3887         error_setg(errp, "dirty-ring-size must be a power of two.");
3888         return;
3889     }
3890 
3891     s->kvm_dirty_ring_size = value;
3892 }
3893 
kvm_get_device(Object * obj,Error ** errp G_GNUC_UNUSED)3894 static char *kvm_get_device(Object *obj,
3895                             Error **errp G_GNUC_UNUSED)
3896 {
3897     KVMState *s = KVM_STATE(obj);
3898 
3899     return g_strdup(s->device);
3900 }
3901 
kvm_set_device(Object * obj,const char * value,Error ** errp G_GNUC_UNUSED)3902 static void kvm_set_device(Object *obj,
3903                            const char *value,
3904                            Error **errp G_GNUC_UNUSED)
3905 {
3906     KVMState *s = KVM_STATE(obj);
3907 
3908     g_free(s->device);
3909     s->device = g_strdup(value);
3910 }
3911 
kvm_set_kvm_rapl(Object * obj,bool value,Error ** errp)3912 static void kvm_set_kvm_rapl(Object *obj, bool value, Error **errp)
3913 {
3914     KVMState *s = KVM_STATE(obj);
3915     s->msr_energy.enable = value;
3916 }
3917 
kvm_set_kvm_rapl_socket_path(Object * obj,const char * str,Error ** errp)3918 static void kvm_set_kvm_rapl_socket_path(Object *obj,
3919                                          const char *str,
3920                                          Error **errp)
3921 {
3922     KVMState *s = KVM_STATE(obj);
3923     g_free(s->msr_energy.socket_path);
3924     s->msr_energy.socket_path = g_strdup(str);
3925 }
3926 
kvm_accel_instance_init(Object * obj)3927 static void kvm_accel_instance_init(Object *obj)
3928 {
3929     KVMState *s = KVM_STATE(obj);
3930 
3931     s->fd = -1;
3932     s->vmfd = -1;
3933     s->kvm_shadow_mem = -1;
3934     s->kernel_irqchip_allowed = true;
3935     s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3936     /* KVM dirty ring is by default off */
3937     s->kvm_dirty_ring_size = 0;
3938     s->kvm_dirty_ring_with_bitmap = false;
3939     s->kvm_eager_split_size = 0;
3940     s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
3941     s->notify_window = 0;
3942     s->xen_version = 0;
3943     s->xen_gnttab_max_frames = 64;
3944     s->xen_evtchn_max_pirq = 256;
3945     s->device = NULL;
3946     s->msr_energy.enable = false;
3947 }
3948 
3949 /**
3950  * kvm_gdbstub_sstep_flags():
3951  *
3952  * Returns: SSTEP_* flags that KVM supports for guest debug. The
3953  * support is probed during kvm_init()
3954  */
kvm_gdbstub_sstep_flags(void)3955 static int kvm_gdbstub_sstep_flags(void)
3956 {
3957     return kvm_sstep_flags;
3958 }
3959 
kvm_accel_class_init(ObjectClass * oc,void * data)3960 static void kvm_accel_class_init(ObjectClass *oc, void *data)
3961 {
3962     AccelClass *ac = ACCEL_CLASS(oc);
3963     ac->name = "KVM";
3964     ac->init_machine = kvm_init;
3965     ac->has_memory = kvm_accel_has_memory;
3966     ac->allowed = &kvm_allowed;
3967     ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
3968 
3969     object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3970         NULL, kvm_set_kernel_irqchip,
3971         NULL, NULL);
3972     object_class_property_set_description(oc, "kernel-irqchip",
3973         "Configure KVM in-kernel irqchip");
3974 
3975     object_class_property_add(oc, "kvm-shadow-mem", "int",
3976         kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3977         NULL, NULL);
3978     object_class_property_set_description(oc, "kvm-shadow-mem",
3979         "KVM shadow MMU size");
3980 
3981     object_class_property_add(oc, "dirty-ring-size", "uint32",
3982         kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
3983         NULL, NULL);
3984     object_class_property_set_description(oc, "dirty-ring-size",
3985         "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3986 
3987     object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device);
3988     object_class_property_set_description(oc, "device",
3989         "Path to the device node to use (default: /dev/kvm)");
3990 
3991     object_class_property_add_bool(oc, "rapl",
3992                                    NULL,
3993                                    kvm_set_kvm_rapl);
3994     object_class_property_set_description(oc, "rapl",
3995         "Allow energy related MSRs for RAPL interface in Guest");
3996 
3997     object_class_property_add_str(oc, "rapl-helper-socket", NULL,
3998                                   kvm_set_kvm_rapl_socket_path);
3999     object_class_property_set_description(oc, "rapl-helper-socket",
4000         "Socket Path for comminucating with the Virtual MSR helper daemon");
4001 
4002     kvm_arch_accel_class_init(oc);
4003 }
4004 
4005 static const TypeInfo kvm_accel_type = {
4006     .name = TYPE_KVM_ACCEL,
4007     .parent = TYPE_ACCEL,
4008     .instance_init = kvm_accel_instance_init,
4009     .class_init = kvm_accel_class_init,
4010     .instance_size = sizeof(KVMState),
4011 };
4012 
kvm_type_init(void)4013 static void kvm_type_init(void)
4014 {
4015     type_register_static(&kvm_accel_type);
4016 }
4017 
4018 type_init(kvm_type_init);
4019 
4020 typedef struct StatsArgs {
4021     union StatsResultsType {
4022         StatsResultList **stats;
4023         StatsSchemaList **schema;
4024     } result;
4025     strList *names;
4026     Error **errp;
4027 } StatsArgs;
4028 
add_kvmstat_entry(struct kvm_stats_desc * pdesc,uint64_t * stats_data,StatsList * stats_list,Error ** errp)4029 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
4030                                     uint64_t *stats_data,
4031                                     StatsList *stats_list,
4032                                     Error **errp)
4033 {
4034 
4035     Stats *stats;
4036     uint64List *val_list = NULL;
4037 
4038     /* Only add stats that we understand.  */
4039     switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
4040     case KVM_STATS_TYPE_CUMULATIVE:
4041     case KVM_STATS_TYPE_INSTANT:
4042     case KVM_STATS_TYPE_PEAK:
4043     case KVM_STATS_TYPE_LINEAR_HIST:
4044     case KVM_STATS_TYPE_LOG_HIST:
4045         break;
4046     default:
4047         return stats_list;
4048     }
4049 
4050     switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
4051     case KVM_STATS_UNIT_NONE:
4052     case KVM_STATS_UNIT_BYTES:
4053     case KVM_STATS_UNIT_CYCLES:
4054     case KVM_STATS_UNIT_SECONDS:
4055     case KVM_STATS_UNIT_BOOLEAN:
4056         break;
4057     default:
4058         return stats_list;
4059     }
4060 
4061     switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4062     case KVM_STATS_BASE_POW10:
4063     case KVM_STATS_BASE_POW2:
4064         break;
4065     default:
4066         return stats_list;
4067     }
4068 
4069     /* Alloc and populate data list */
4070     stats = g_new0(Stats, 1);
4071     stats->name = g_strdup(pdesc->name);
4072     stats->value = g_new0(StatsValue, 1);
4073 
4074     if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
4075         stats->value->u.boolean = *stats_data;
4076         stats->value->type = QTYPE_QBOOL;
4077     } else if (pdesc->size == 1) {
4078         stats->value->u.scalar = *stats_data;
4079         stats->value->type = QTYPE_QNUM;
4080     } else {
4081         int i;
4082         for (i = 0; i < pdesc->size; i++) {
4083             QAPI_LIST_PREPEND(val_list, stats_data[i]);
4084         }
4085         stats->value->u.list = val_list;
4086         stats->value->type = QTYPE_QLIST;
4087     }
4088 
4089     QAPI_LIST_PREPEND(stats_list, stats);
4090     return stats_list;
4091 }
4092 
add_kvmschema_entry(struct kvm_stats_desc * pdesc,StatsSchemaValueList * list,Error ** errp)4093 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
4094                                                  StatsSchemaValueList *list,
4095                                                  Error **errp)
4096 {
4097     StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
4098     schema_entry->value = g_new0(StatsSchemaValue, 1);
4099 
4100     switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
4101     case KVM_STATS_TYPE_CUMULATIVE:
4102         schema_entry->value->type = STATS_TYPE_CUMULATIVE;
4103         break;
4104     case KVM_STATS_TYPE_INSTANT:
4105         schema_entry->value->type = STATS_TYPE_INSTANT;
4106         break;
4107     case KVM_STATS_TYPE_PEAK:
4108         schema_entry->value->type = STATS_TYPE_PEAK;
4109         break;
4110     case KVM_STATS_TYPE_LINEAR_HIST:
4111         schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
4112         schema_entry->value->bucket_size = pdesc->bucket_size;
4113         schema_entry->value->has_bucket_size = true;
4114         break;
4115     case KVM_STATS_TYPE_LOG_HIST:
4116         schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
4117         break;
4118     default:
4119         goto exit;
4120     }
4121 
4122     switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
4123     case KVM_STATS_UNIT_NONE:
4124         break;
4125     case KVM_STATS_UNIT_BOOLEAN:
4126         schema_entry->value->has_unit = true;
4127         schema_entry->value->unit = STATS_UNIT_BOOLEAN;
4128         break;
4129     case KVM_STATS_UNIT_BYTES:
4130         schema_entry->value->has_unit = true;
4131         schema_entry->value->unit = STATS_UNIT_BYTES;
4132         break;
4133     case KVM_STATS_UNIT_CYCLES:
4134         schema_entry->value->has_unit = true;
4135         schema_entry->value->unit = STATS_UNIT_CYCLES;
4136         break;
4137     case KVM_STATS_UNIT_SECONDS:
4138         schema_entry->value->has_unit = true;
4139         schema_entry->value->unit = STATS_UNIT_SECONDS;
4140         break;
4141     default:
4142         goto exit;
4143     }
4144 
4145     schema_entry->value->exponent = pdesc->exponent;
4146     if (pdesc->exponent) {
4147         switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4148         case KVM_STATS_BASE_POW10:
4149             schema_entry->value->has_base = true;
4150             schema_entry->value->base = 10;
4151             break;
4152         case KVM_STATS_BASE_POW2:
4153             schema_entry->value->has_base = true;
4154             schema_entry->value->base = 2;
4155             break;
4156         default:
4157             goto exit;
4158         }
4159     }
4160 
4161     schema_entry->value->name = g_strdup(pdesc->name);
4162     schema_entry->next = list;
4163     return schema_entry;
4164 exit:
4165     g_free(schema_entry->value);
4166     g_free(schema_entry);
4167     return list;
4168 }
4169 
4170 /* Cached stats descriptors */
4171 typedef struct StatsDescriptors {
4172     const char *ident; /* cache key, currently the StatsTarget */
4173     struct kvm_stats_desc *kvm_stats_desc;
4174     struct kvm_stats_header kvm_stats_header;
4175     QTAILQ_ENTRY(StatsDescriptors) next;
4176 } StatsDescriptors;
4177 
4178 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
4179     QTAILQ_HEAD_INITIALIZER(stats_descriptors);
4180 
4181 /*
4182  * Return the descriptors for 'target', that either have already been read
4183  * or are retrieved from 'stats_fd'.
4184  */
find_stats_descriptors(StatsTarget target,int stats_fd,Error ** errp)4185 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
4186                                                 Error **errp)
4187 {
4188     StatsDescriptors *descriptors;
4189     const char *ident;
4190     struct kvm_stats_desc *kvm_stats_desc;
4191     struct kvm_stats_header *kvm_stats_header;
4192     size_t size_desc;
4193     ssize_t ret;
4194 
4195     ident = StatsTarget_str(target);
4196     QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
4197         if (g_str_equal(descriptors->ident, ident)) {
4198             return descriptors;
4199         }
4200     }
4201 
4202     descriptors = g_new0(StatsDescriptors, 1);
4203 
4204     /* Read stats header */
4205     kvm_stats_header = &descriptors->kvm_stats_header;
4206     ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
4207     if (ret != sizeof(*kvm_stats_header)) {
4208         error_setg(errp, "KVM stats: failed to read stats header: "
4209                    "expected %zu actual %zu",
4210                    sizeof(*kvm_stats_header), ret);
4211         g_free(descriptors);
4212         return NULL;
4213     }
4214     size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4215 
4216     /* Read stats descriptors */
4217     kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
4218     ret = pread(stats_fd, kvm_stats_desc,
4219                 size_desc * kvm_stats_header->num_desc,
4220                 kvm_stats_header->desc_offset);
4221 
4222     if (ret != size_desc * kvm_stats_header->num_desc) {
4223         error_setg(errp, "KVM stats: failed to read stats descriptors: "
4224                    "expected %zu actual %zu",
4225                    size_desc * kvm_stats_header->num_desc, ret);
4226         g_free(descriptors);
4227         g_free(kvm_stats_desc);
4228         return NULL;
4229     }
4230     descriptors->kvm_stats_desc = kvm_stats_desc;
4231     descriptors->ident = ident;
4232     QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
4233     return descriptors;
4234 }
4235 
query_stats(StatsResultList ** result,StatsTarget target,strList * names,int stats_fd,CPUState * cpu,Error ** errp)4236 static void query_stats(StatsResultList **result, StatsTarget target,
4237                         strList *names, int stats_fd, CPUState *cpu,
4238                         Error **errp)
4239 {
4240     struct kvm_stats_desc *kvm_stats_desc;
4241     struct kvm_stats_header *kvm_stats_header;
4242     StatsDescriptors *descriptors;
4243     g_autofree uint64_t *stats_data = NULL;
4244     struct kvm_stats_desc *pdesc;
4245     StatsList *stats_list = NULL;
4246     size_t size_desc, size_data = 0;
4247     ssize_t ret;
4248     int i;
4249 
4250     descriptors = find_stats_descriptors(target, stats_fd, errp);
4251     if (!descriptors) {
4252         return;
4253     }
4254 
4255     kvm_stats_header = &descriptors->kvm_stats_header;
4256     kvm_stats_desc = descriptors->kvm_stats_desc;
4257     size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4258 
4259     /* Tally the total data size; read schema data */
4260     for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4261         pdesc = (void *)kvm_stats_desc + i * size_desc;
4262         size_data += pdesc->size * sizeof(*stats_data);
4263     }
4264 
4265     stats_data = g_malloc0(size_data);
4266     ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
4267 
4268     if (ret != size_data) {
4269         error_setg(errp, "KVM stats: failed to read data: "
4270                    "expected %zu actual %zu", size_data, ret);
4271         return;
4272     }
4273 
4274     for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4275         uint64_t *stats;
4276         pdesc = (void *)kvm_stats_desc + i * size_desc;
4277 
4278         /* Add entry to the list */
4279         stats = (void *)stats_data + pdesc->offset;
4280         if (!apply_str_list_filter(pdesc->name, names)) {
4281             continue;
4282         }
4283         stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
4284     }
4285 
4286     if (!stats_list) {
4287         return;
4288     }
4289 
4290     switch (target) {
4291     case STATS_TARGET_VM:
4292         add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
4293         break;
4294     case STATS_TARGET_VCPU:
4295         add_stats_entry(result, STATS_PROVIDER_KVM,
4296                         cpu->parent_obj.canonical_path,
4297                         stats_list);
4298         break;
4299     default:
4300         g_assert_not_reached();
4301     }
4302 }
4303 
query_stats_schema(StatsSchemaList ** result,StatsTarget target,int stats_fd,Error ** errp)4304 static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
4305                                int stats_fd, Error **errp)
4306 {
4307     struct kvm_stats_desc *kvm_stats_desc;
4308     struct kvm_stats_header *kvm_stats_header;
4309     StatsDescriptors *descriptors;
4310     struct kvm_stats_desc *pdesc;
4311     StatsSchemaValueList *stats_list = NULL;
4312     size_t size_desc;
4313     int i;
4314 
4315     descriptors = find_stats_descriptors(target, stats_fd, errp);
4316     if (!descriptors) {
4317         return;
4318     }
4319 
4320     kvm_stats_header = &descriptors->kvm_stats_header;
4321     kvm_stats_desc = descriptors->kvm_stats_desc;
4322     size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4323 
4324     /* Tally the total data size; read schema data */
4325     for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4326         pdesc = (void *)kvm_stats_desc + i * size_desc;
4327         stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
4328     }
4329 
4330     add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
4331 }
4332 
query_stats_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4333 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4334 {
4335     int stats_fd = cpu->kvm_vcpu_stats_fd;
4336     Error *local_err = NULL;
4337 
4338     if (stats_fd == -1) {
4339         error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4340         error_propagate(kvm_stats_args->errp, local_err);
4341         return;
4342     }
4343     query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4344                 kvm_stats_args->names, stats_fd, cpu,
4345                 kvm_stats_args->errp);
4346 }
4347 
query_stats_schema_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4348 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4349 {
4350     int stats_fd = cpu->kvm_vcpu_stats_fd;
4351     Error *local_err = NULL;
4352 
4353     if (stats_fd == -1) {
4354         error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4355         error_propagate(kvm_stats_args->errp, local_err);
4356         return;
4357     }
4358     query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4359                        kvm_stats_args->errp);
4360 }
4361 
query_stats_cb(StatsResultList ** result,StatsTarget target,strList * names,strList * targets,Error ** errp)4362 static void query_stats_cb(StatsResultList **result, StatsTarget target,
4363                            strList *names, strList *targets, Error **errp)
4364 {
4365     KVMState *s = kvm_state;
4366     CPUState *cpu;
4367     int stats_fd;
4368 
4369     switch (target) {
4370     case STATS_TARGET_VM:
4371     {
4372         stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4373         if (stats_fd == -1) {
4374             error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4375             return;
4376         }
4377         query_stats(result, target, names, stats_fd, NULL, errp);
4378         close(stats_fd);
4379         break;
4380     }
4381     case STATS_TARGET_VCPU:
4382     {
4383         StatsArgs stats_args;
4384         stats_args.result.stats = result;
4385         stats_args.names = names;
4386         stats_args.errp = errp;
4387         CPU_FOREACH(cpu) {
4388             if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4389                 continue;
4390             }
4391             query_stats_vcpu(cpu, &stats_args);
4392         }
4393         break;
4394     }
4395     default:
4396         break;
4397     }
4398 }
4399 
query_stats_schemas_cb(StatsSchemaList ** result,Error ** errp)4400 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4401 {
4402     StatsArgs stats_args;
4403     KVMState *s = kvm_state;
4404     int stats_fd;
4405 
4406     stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4407     if (stats_fd == -1) {
4408         error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4409         return;
4410     }
4411     query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4412     close(stats_fd);
4413 
4414     if (first_cpu) {
4415         stats_args.result.schema = result;
4416         stats_args.errp = errp;
4417         query_stats_schema_vcpu(first_cpu, &stats_args);
4418     }
4419 }
4420 
kvm_mark_guest_state_protected(void)4421 void kvm_mark_guest_state_protected(void)
4422 {
4423     kvm_state->guest_state_protected = true;
4424 }
4425 
kvm_create_guest_memfd(uint64_t size,uint64_t flags,Error ** errp)4426 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
4427 {
4428     int fd;
4429     struct kvm_create_guest_memfd guest_memfd = {
4430         .size = size,
4431         .flags = flags,
4432     };
4433 
4434     if (!kvm_guest_memfd_supported) {
4435         error_setg(errp, "KVM does not support guest_memfd");
4436         return -1;
4437     }
4438 
4439     fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
4440     if (fd < 0) {
4441         error_setg_errno(errp, errno, "Error creating KVM guest_memfd");
4442         return -1;
4443     }
4444 
4445     return fd;
4446 }
4447