1 /*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <poll.h>
19
20 #include <linux/kvm.h>
21
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "gdbstub/enums.h"
31 #include "system/kvm_int.h"
32 #include "system/runstate.h"
33 #include "system/cpus.h"
34 #include "system/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/memory.h"
37 #include "exec/ram_addr.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
40 #include "trace.h"
41 #include "hw/irq.h"
42 #include "qapi/visitor.h"
43 #include "qapi/qapi-types-common.h"
44 #include "qapi/qapi-visit-common.h"
45 #include "system/reset.h"
46 #include "qemu/guest-random.h"
47 #include "system/hw_accel.h"
48 #include "kvm-cpus.h"
49 #include "system/dirtylimit.h"
50 #include "qemu/range.h"
51
52 #include "hw/boards.h"
53 #include "system/stats.h"
54
55 /* This check must be after config-host.h is included */
56 #ifdef CONFIG_EVENTFD
57 #include <sys/eventfd.h>
58 #endif
59
60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
61 * need to use the real host PAGE_SIZE, as that's what KVM will use.
62 */
63 #ifdef PAGE_SIZE
64 #undef PAGE_SIZE
65 #endif
66 #define PAGE_SIZE qemu_real_host_page_size()
67
68 #ifndef KVM_GUESTDBG_BLOCKIRQ
69 #define KVM_GUESTDBG_BLOCKIRQ 0
70 #endif
71
72 /* Default num of memslots to be allocated when VM starts */
73 #define KVM_MEMSLOTS_NR_ALLOC_DEFAULT 16
74 /* Default max allowed memslots if kernel reported nothing */
75 #define KVM_MEMSLOTS_NR_MAX_DEFAULT 32
76
77 struct KVMParkedVcpu {
78 unsigned long vcpu_id;
79 int kvm_fd;
80 QLIST_ENTRY(KVMParkedVcpu) node;
81 };
82
83 KVMState *kvm_state;
84 bool kvm_kernel_irqchip;
85 bool kvm_split_irqchip;
86 bool kvm_async_interrupts_allowed;
87 bool kvm_halt_in_kernel_allowed;
88 bool kvm_resamplefds_allowed;
89 bool kvm_msi_via_irqfd_allowed;
90 bool kvm_gsi_routing_allowed;
91 bool kvm_gsi_direct_mapping;
92 bool kvm_allowed;
93 bool kvm_readonly_mem_allowed;
94 bool kvm_vm_attributes_allowed;
95 bool kvm_msi_use_devid;
96 static bool kvm_has_guest_debug;
97 static int kvm_sstep_flags;
98 static bool kvm_immediate_exit;
99 static uint64_t kvm_supported_memory_attributes;
100 static bool kvm_guest_memfd_supported;
101 static hwaddr kvm_max_slot_size = ~0;
102
103 static const KVMCapabilityInfo kvm_required_capabilites[] = {
104 KVM_CAP_INFO(USER_MEMORY),
105 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
106 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
107 KVM_CAP_INFO(INTERNAL_ERROR_DATA),
108 KVM_CAP_INFO(IOEVENTFD),
109 KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
110 KVM_CAP_LAST_INFO
111 };
112
113 static NotifierList kvm_irqchip_change_notifiers =
114 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
115
116 struct KVMResampleFd {
117 int gsi;
118 EventNotifier *resample_event;
119 QLIST_ENTRY(KVMResampleFd) node;
120 };
121 typedef struct KVMResampleFd KVMResampleFd;
122
123 /*
124 * Only used with split irqchip where we need to do the resample fd
125 * kick for the kernel from userspace.
126 */
127 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
128 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
129
130 static QemuMutex kml_slots_lock;
131
132 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
133 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
134
135 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
136
kvm_resample_fd_remove(int gsi)137 static inline void kvm_resample_fd_remove(int gsi)
138 {
139 KVMResampleFd *rfd;
140
141 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
142 if (rfd->gsi == gsi) {
143 QLIST_REMOVE(rfd, node);
144 g_free(rfd);
145 break;
146 }
147 }
148 }
149
kvm_resample_fd_insert(int gsi,EventNotifier * event)150 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
151 {
152 KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
153
154 rfd->gsi = gsi;
155 rfd->resample_event = event;
156
157 QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
158 }
159
kvm_resample_fd_notify(int gsi)160 void kvm_resample_fd_notify(int gsi)
161 {
162 KVMResampleFd *rfd;
163
164 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
165 if (rfd->gsi == gsi) {
166 event_notifier_set(rfd->resample_event);
167 trace_kvm_resample_fd_notify(gsi);
168 return;
169 }
170 }
171 }
172
173 /**
174 * kvm_slots_grow(): Grow the slots[] array in the KVMMemoryListener
175 *
176 * @kml: The KVMMemoryListener* to grow the slots[] array
177 * @nr_slots_new: The new size of slots[] array
178 *
179 * Returns: True if the array grows larger, false otherwise.
180 */
kvm_slots_grow(KVMMemoryListener * kml,unsigned int nr_slots_new)181 static bool kvm_slots_grow(KVMMemoryListener *kml, unsigned int nr_slots_new)
182 {
183 unsigned int i, cur = kml->nr_slots_allocated;
184 KVMSlot *slots;
185
186 if (nr_slots_new > kvm_state->nr_slots_max) {
187 nr_slots_new = kvm_state->nr_slots_max;
188 }
189
190 if (cur >= nr_slots_new) {
191 /* Big enough, no need to grow, or we reached max */
192 return false;
193 }
194
195 if (cur == 0) {
196 slots = g_new0(KVMSlot, nr_slots_new);
197 } else {
198 assert(kml->slots);
199 slots = g_renew(KVMSlot, kml->slots, nr_slots_new);
200 /*
201 * g_renew() doesn't initialize extended buffers, however kvm
202 * memslots require fields to be zero-initialized. E.g. pointers,
203 * memory_size field, etc.
204 */
205 memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur));
206 }
207
208 for (i = cur; i < nr_slots_new; i++) {
209 slots[i].slot = i;
210 }
211
212 kml->slots = slots;
213 kml->nr_slots_allocated = nr_slots_new;
214 trace_kvm_slots_grow(cur, nr_slots_new);
215
216 return true;
217 }
218
kvm_slots_double(KVMMemoryListener * kml)219 static bool kvm_slots_double(KVMMemoryListener *kml)
220 {
221 return kvm_slots_grow(kml, kml->nr_slots_allocated * 2);
222 }
223
kvm_get_max_memslots(void)224 unsigned int kvm_get_max_memslots(void)
225 {
226 KVMState *s = KVM_STATE(current_accel());
227
228 return s->nr_slots_max;
229 }
230
kvm_get_free_memslots(void)231 unsigned int kvm_get_free_memslots(void)
232 {
233 unsigned int used_slots = 0;
234 KVMState *s = kvm_state;
235 int i;
236
237 kvm_slots_lock();
238 for (i = 0; i < s->nr_as; i++) {
239 if (!s->as[i].ml) {
240 continue;
241 }
242 used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used);
243 }
244 kvm_slots_unlock();
245
246 return s->nr_slots_max - used_slots;
247 }
248
249 /* Called with KVMMemoryListener.slots_lock held */
kvm_get_free_slot(KVMMemoryListener * kml)250 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
251 {
252 unsigned int n;
253 int i;
254
255 for (i = 0; i < kml->nr_slots_allocated; i++) {
256 if (kml->slots[i].memory_size == 0) {
257 return &kml->slots[i];
258 }
259 }
260
261 /*
262 * If no free slots, try to grow first by doubling. Cache the old size
263 * here to avoid another round of search: if the grow succeeded, it
264 * means slots[] now must have the existing "n" slots occupied,
265 * followed by one or more free slots starting from slots[n].
266 */
267 n = kml->nr_slots_allocated;
268 if (kvm_slots_double(kml)) {
269 return &kml->slots[n];
270 }
271
272 return NULL;
273 }
274
275 /* Called with KVMMemoryListener.slots_lock held */
kvm_alloc_slot(KVMMemoryListener * kml)276 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
277 {
278 KVMSlot *slot = kvm_get_free_slot(kml);
279
280 if (slot) {
281 return slot;
282 }
283
284 fprintf(stderr, "%s: no free slot available\n", __func__);
285 abort();
286 }
287
kvm_lookup_matching_slot(KVMMemoryListener * kml,hwaddr start_addr,hwaddr size)288 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
289 hwaddr start_addr,
290 hwaddr size)
291 {
292 int i;
293
294 for (i = 0; i < kml->nr_slots_allocated; i++) {
295 KVMSlot *mem = &kml->slots[i];
296
297 if (start_addr == mem->start_addr && size == mem->memory_size) {
298 return mem;
299 }
300 }
301
302 return NULL;
303 }
304
305 /*
306 * Calculate and align the start address and the size of the section.
307 * Return the size. If the size is 0, the aligned section is empty.
308 */
kvm_align_section(MemoryRegionSection * section,hwaddr * start)309 static hwaddr kvm_align_section(MemoryRegionSection *section,
310 hwaddr *start)
311 {
312 hwaddr size = int128_get64(section->size);
313 hwaddr delta, aligned;
314
315 /* kvm works in page size chunks, but the function may be called
316 with sub-page size and unaligned start address. Pad the start
317 address to next and truncate size to previous page boundary. */
318 aligned = ROUND_UP(section->offset_within_address_space,
319 qemu_real_host_page_size());
320 delta = aligned - section->offset_within_address_space;
321 *start = aligned;
322 if (delta > size) {
323 return 0;
324 }
325
326 return (size - delta) & qemu_real_host_page_mask();
327 }
328
kvm_physical_memory_addr_from_host(KVMState * s,void * ram,hwaddr * phys_addr)329 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
330 hwaddr *phys_addr)
331 {
332 KVMMemoryListener *kml = &s->memory_listener;
333 int i, ret = 0;
334
335 kvm_slots_lock();
336 for (i = 0; i < kml->nr_slots_allocated; i++) {
337 KVMSlot *mem = &kml->slots[i];
338
339 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
340 *phys_addr = mem->start_addr + (ram - mem->ram);
341 ret = 1;
342 break;
343 }
344 }
345 kvm_slots_unlock();
346
347 return ret;
348 }
349
kvm_set_user_memory_region(KVMMemoryListener * kml,KVMSlot * slot,bool new)350 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
351 {
352 KVMState *s = kvm_state;
353 struct kvm_userspace_memory_region2 mem;
354 int ret;
355
356 mem.slot = slot->slot | (kml->as_id << 16);
357 mem.guest_phys_addr = slot->start_addr;
358 mem.userspace_addr = (unsigned long)slot->ram;
359 mem.flags = slot->flags;
360 mem.guest_memfd = slot->guest_memfd;
361 mem.guest_memfd_offset = slot->guest_memfd_offset;
362
363 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
364 /* Set the slot size to 0 before setting the slot to the desired
365 * value. This is needed based on KVM commit 75d61fbc. */
366 mem.memory_size = 0;
367
368 if (kvm_guest_memfd_supported) {
369 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
370 } else {
371 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
372 }
373 if (ret < 0) {
374 goto err;
375 }
376 }
377 mem.memory_size = slot->memory_size;
378 if (kvm_guest_memfd_supported) {
379 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
380 } else {
381 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
382 }
383 slot->old_flags = mem.flags;
384 err:
385 trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags,
386 mem.guest_phys_addr, mem.memory_size,
387 mem.userspace_addr, mem.guest_memfd,
388 mem.guest_memfd_offset, ret);
389 if (ret < 0) {
390 if (kvm_guest_memfd_supported) {
391 error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d,"
392 " start=0x%" PRIx64 ", size=0x%" PRIx64 ","
393 " flags=0x%" PRIx32 ", guest_memfd=%" PRId32 ","
394 " guest_memfd_offset=0x%" PRIx64 ": %s",
395 __func__, mem.slot, slot->start_addr,
396 (uint64_t)mem.memory_size, mem.flags,
397 mem.guest_memfd, (uint64_t)mem.guest_memfd_offset,
398 strerror(errno));
399 } else {
400 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
401 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
402 __func__, mem.slot, slot->start_addr,
403 (uint64_t)mem.memory_size, strerror(errno));
404 }
405 }
406 return ret;
407 }
408
kvm_park_vcpu(CPUState * cpu)409 void kvm_park_vcpu(CPUState *cpu)
410 {
411 struct KVMParkedVcpu *vcpu;
412
413 trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
414
415 vcpu = g_malloc0(sizeof(*vcpu));
416 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
417 vcpu->kvm_fd = cpu->kvm_fd;
418 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
419 }
420
kvm_unpark_vcpu(KVMState * s,unsigned long vcpu_id)421 int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id)
422 {
423 struct KVMParkedVcpu *cpu;
424 int kvm_fd = -ENOENT;
425
426 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
427 if (cpu->vcpu_id == vcpu_id) {
428 QLIST_REMOVE(cpu, node);
429 kvm_fd = cpu->kvm_fd;
430 g_free(cpu);
431 break;
432 }
433 }
434
435 trace_kvm_unpark_vcpu(vcpu_id, kvm_fd > 0 ? "unparked" : "!found parked");
436
437 return kvm_fd;
438 }
439
kvm_reset_parked_vcpus(KVMState * s)440 static void kvm_reset_parked_vcpus(KVMState *s)
441 {
442 struct KVMParkedVcpu *cpu;
443
444 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
445 kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd);
446 }
447 }
448
kvm_create_vcpu(CPUState * cpu)449 int kvm_create_vcpu(CPUState *cpu)
450 {
451 unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
452 KVMState *s = kvm_state;
453 int kvm_fd;
454
455 /* check if the KVM vCPU already exist but is parked */
456 kvm_fd = kvm_unpark_vcpu(s, vcpu_id);
457 if (kvm_fd < 0) {
458 /* vCPU not parked: create a new KVM vCPU */
459 kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
460 if (kvm_fd < 0) {
461 error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id);
462 return kvm_fd;
463 }
464 }
465
466 cpu->kvm_fd = kvm_fd;
467 cpu->kvm_state = s;
468 cpu->vcpu_dirty = true;
469 cpu->dirty_pages = 0;
470 cpu->throttle_us_per_full = 0;
471
472 trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd);
473
474 return 0;
475 }
476
kvm_create_and_park_vcpu(CPUState * cpu)477 int kvm_create_and_park_vcpu(CPUState *cpu)
478 {
479 int ret = 0;
480
481 ret = kvm_create_vcpu(cpu);
482 if (!ret) {
483 kvm_park_vcpu(cpu);
484 }
485
486 return ret;
487 }
488
do_kvm_destroy_vcpu(CPUState * cpu)489 static int do_kvm_destroy_vcpu(CPUState *cpu)
490 {
491 KVMState *s = kvm_state;
492 int mmap_size;
493 int ret = 0;
494
495 trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
496
497 ret = kvm_arch_destroy_vcpu(cpu);
498 if (ret < 0) {
499 goto err;
500 }
501
502 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
503 if (mmap_size < 0) {
504 ret = mmap_size;
505 trace_kvm_failed_get_vcpu_mmap_size();
506 goto err;
507 }
508
509 ret = munmap(cpu->kvm_run, mmap_size);
510 if (ret < 0) {
511 goto err;
512 }
513
514 if (cpu->kvm_dirty_gfns) {
515 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
516 if (ret < 0) {
517 goto err;
518 }
519 }
520
521 kvm_park_vcpu(cpu);
522 err:
523 return ret;
524 }
525
kvm_destroy_vcpu(CPUState * cpu)526 void kvm_destroy_vcpu(CPUState *cpu)
527 {
528 if (do_kvm_destroy_vcpu(cpu) < 0) {
529 error_report("kvm_destroy_vcpu failed");
530 exit(EXIT_FAILURE);
531 }
532 }
533
kvm_init_vcpu(CPUState * cpu,Error ** errp)534 int kvm_init_vcpu(CPUState *cpu, Error **errp)
535 {
536 KVMState *s = kvm_state;
537 int mmap_size;
538 int ret;
539
540 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
541
542 ret = kvm_create_vcpu(cpu);
543 if (ret < 0) {
544 error_setg_errno(errp, -ret,
545 "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
546 kvm_arch_vcpu_id(cpu));
547 goto err;
548 }
549
550 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
551 if (mmap_size < 0) {
552 ret = mmap_size;
553 error_setg_errno(errp, -mmap_size,
554 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
555 goto err;
556 }
557
558 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
559 cpu->kvm_fd, 0);
560 if (cpu->kvm_run == MAP_FAILED) {
561 ret = -errno;
562 error_setg_errno(errp, ret,
563 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
564 kvm_arch_vcpu_id(cpu));
565 goto err;
566 }
567
568 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
569 s->coalesced_mmio_ring =
570 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
571 }
572
573 if (s->kvm_dirty_ring_size) {
574 /* Use MAP_SHARED to share pages with the kernel */
575 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
576 PROT_READ | PROT_WRITE, MAP_SHARED,
577 cpu->kvm_fd,
578 PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
579 if (cpu->kvm_dirty_gfns == MAP_FAILED) {
580 ret = -errno;
581 goto err;
582 }
583 }
584
585 ret = kvm_arch_init_vcpu(cpu);
586 if (ret < 0) {
587 error_setg_errno(errp, -ret,
588 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
589 kvm_arch_vcpu_id(cpu));
590 }
591 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
592
593 err:
594 return ret;
595 }
596
597 /*
598 * dirty pages logging control
599 */
600
kvm_mem_flags(MemoryRegion * mr)601 static int kvm_mem_flags(MemoryRegion *mr)
602 {
603 bool readonly = mr->readonly || memory_region_is_romd(mr);
604 int flags = 0;
605
606 if (memory_region_get_dirty_log_mask(mr) != 0) {
607 flags |= KVM_MEM_LOG_DIRTY_PAGES;
608 }
609 if (readonly && kvm_readonly_mem_allowed) {
610 flags |= KVM_MEM_READONLY;
611 }
612 if (memory_region_has_guest_memfd(mr)) {
613 assert(kvm_guest_memfd_supported);
614 flags |= KVM_MEM_GUEST_MEMFD;
615 }
616 return flags;
617 }
618
619 /* Called with KVMMemoryListener.slots_lock held */
kvm_slot_update_flags(KVMMemoryListener * kml,KVMSlot * mem,MemoryRegion * mr)620 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
621 MemoryRegion *mr)
622 {
623 mem->flags = kvm_mem_flags(mr);
624
625 /* If nothing changed effectively, no need to issue ioctl */
626 if (mem->flags == mem->old_flags) {
627 return 0;
628 }
629
630 kvm_slot_init_dirty_bitmap(mem);
631 return kvm_set_user_memory_region(kml, mem, false);
632 }
633
kvm_section_update_flags(KVMMemoryListener * kml,MemoryRegionSection * section)634 static int kvm_section_update_flags(KVMMemoryListener *kml,
635 MemoryRegionSection *section)
636 {
637 hwaddr start_addr, size, slot_size;
638 KVMSlot *mem;
639 int ret = 0;
640
641 size = kvm_align_section(section, &start_addr);
642 if (!size) {
643 return 0;
644 }
645
646 kvm_slots_lock();
647
648 while (size && !ret) {
649 slot_size = MIN(kvm_max_slot_size, size);
650 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
651 if (!mem) {
652 /* We don't have a slot if we want to trap every access. */
653 goto out;
654 }
655
656 ret = kvm_slot_update_flags(kml, mem, section->mr);
657 start_addr += slot_size;
658 size -= slot_size;
659 }
660
661 out:
662 kvm_slots_unlock();
663 return ret;
664 }
665
kvm_log_start(MemoryListener * listener,MemoryRegionSection * section,int old,int new)666 static void kvm_log_start(MemoryListener *listener,
667 MemoryRegionSection *section,
668 int old, int new)
669 {
670 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
671 int r;
672
673 if (old != 0) {
674 return;
675 }
676
677 r = kvm_section_update_flags(kml, section);
678 if (r < 0) {
679 abort();
680 }
681 }
682
kvm_log_stop(MemoryListener * listener,MemoryRegionSection * section,int old,int new)683 static void kvm_log_stop(MemoryListener *listener,
684 MemoryRegionSection *section,
685 int old, int new)
686 {
687 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
688 int r;
689
690 if (new != 0) {
691 return;
692 }
693
694 r = kvm_section_update_flags(kml, section);
695 if (r < 0) {
696 abort();
697 }
698 }
699
700 /* get kvm's dirty pages bitmap and update qemu's */
kvm_slot_sync_dirty_pages(KVMSlot * slot)701 static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
702 {
703 ram_addr_t start = slot->ram_start_offset;
704 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
705
706 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
707 }
708
kvm_slot_reset_dirty_pages(KVMSlot * slot)709 static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
710 {
711 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
712 }
713
714 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
715
716 /* Allocate the dirty bitmap for a slot */
kvm_slot_init_dirty_bitmap(KVMSlot * mem)717 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
718 {
719 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
720 return;
721 }
722
723 /*
724 * XXX bad kernel interface alert
725 * For dirty bitmap, kernel allocates array of size aligned to
726 * bits-per-long. But for case when the kernel is 64bits and
727 * the userspace is 32bits, userspace can't align to the same
728 * bits-per-long, since sizeof(long) is different between kernel
729 * and user space. This way, userspace will provide buffer which
730 * may be 4 bytes less than the kernel will use, resulting in
731 * userspace memory corruption (which is not detectable by valgrind
732 * too, in most cases).
733 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
734 * a hope that sizeof(long) won't become >8 any time soon.
735 *
736 * Note: the granule of kvm dirty log is qemu_real_host_page_size.
737 * And mem->memory_size is aligned to it (otherwise this mem can't
738 * be registered to KVM).
739 */
740 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
741 /*HOST_LONG_BITS*/ 64) / 8;
742 mem->dirty_bmap = g_malloc0(bitmap_size);
743 mem->dirty_bmap_size = bitmap_size;
744 }
745
746 /*
747 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
748 * succeeded, false otherwise
749 */
kvm_slot_get_dirty_log(KVMState * s,KVMSlot * slot)750 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
751 {
752 struct kvm_dirty_log d = {};
753 int ret;
754
755 d.dirty_bitmap = slot->dirty_bmap;
756 d.slot = slot->slot | (slot->as_id << 16);
757 ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
758
759 if (ret == -ENOENT) {
760 /* kernel does not have dirty bitmap in this slot */
761 ret = 0;
762 }
763 if (ret) {
764 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
765 __func__, ret);
766 }
767 return ret == 0;
768 }
769
770 /* Should be with all slots_lock held for the address spaces. */
kvm_dirty_ring_mark_page(KVMState * s,uint32_t as_id,uint32_t slot_id,uint64_t offset)771 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
772 uint32_t slot_id, uint64_t offset)
773 {
774 KVMMemoryListener *kml;
775 KVMSlot *mem;
776
777 if (as_id >= s->nr_as) {
778 return;
779 }
780
781 kml = s->as[as_id].ml;
782 mem = &kml->slots[slot_id];
783
784 if (!mem->memory_size || offset >=
785 (mem->memory_size / qemu_real_host_page_size())) {
786 return;
787 }
788
789 set_bit(offset, mem->dirty_bmap);
790 }
791
dirty_gfn_is_dirtied(struct kvm_dirty_gfn * gfn)792 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
793 {
794 /*
795 * Read the flags before the value. Pairs with barrier in
796 * KVM's kvm_dirty_ring_push() function.
797 */
798 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
799 }
800
dirty_gfn_set_collected(struct kvm_dirty_gfn * gfn)801 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
802 {
803 /*
804 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
805 * sees the full content of the ring:
806 *
807 * CPU0 CPU1 CPU2
808 * ------------------------------------------------------------------------------
809 * fill gfn0
810 * store-rel flags for gfn0
811 * load-acq flags for gfn0
812 * store-rel RESET for gfn0
813 * ioctl(RESET_RINGS)
814 * load-acq flags for gfn0
815 * check if flags have RESET
816 *
817 * The synchronization goes from CPU2 to CPU0 to CPU1.
818 */
819 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
820 }
821
822 /*
823 * Should be with all slots_lock held for the address spaces. It returns the
824 * dirty page we've collected on this dirty ring.
825 */
kvm_dirty_ring_reap_one(KVMState * s,CPUState * cpu)826 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
827 {
828 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
829 uint32_t ring_size = s->kvm_dirty_ring_size;
830 uint32_t count = 0, fetch = cpu->kvm_fetch_index;
831
832 /*
833 * It's possible that we race with vcpu creation code where the vcpu is
834 * put onto the vcpus list but not yet initialized the dirty ring
835 * structures. If so, skip it.
836 */
837 if (!cpu->created) {
838 return 0;
839 }
840
841 assert(dirty_gfns && ring_size);
842 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
843
844 while (true) {
845 cur = &dirty_gfns[fetch % ring_size];
846 if (!dirty_gfn_is_dirtied(cur)) {
847 break;
848 }
849 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
850 cur->offset);
851 dirty_gfn_set_collected(cur);
852 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
853 fetch++;
854 count++;
855 }
856 cpu->kvm_fetch_index = fetch;
857 cpu->dirty_pages += count;
858
859 return count;
860 }
861
862 /* Must be with slots_lock held */
kvm_dirty_ring_reap_locked(KVMState * s,CPUState * cpu)863 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
864 {
865 int ret;
866 uint64_t total = 0;
867 int64_t stamp;
868
869 stamp = get_clock();
870
871 if (cpu) {
872 total = kvm_dirty_ring_reap_one(s, cpu);
873 } else {
874 CPU_FOREACH(cpu) {
875 total += kvm_dirty_ring_reap_one(s, cpu);
876 }
877 }
878
879 if (total) {
880 ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
881 assert(ret == total);
882 }
883
884 stamp = get_clock() - stamp;
885
886 if (total) {
887 trace_kvm_dirty_ring_reap(total, stamp / 1000);
888 }
889
890 return total;
891 }
892
893 /*
894 * Currently for simplicity, we must hold BQL before calling this. We can
895 * consider to drop the BQL if we're clear with all the race conditions.
896 */
kvm_dirty_ring_reap(KVMState * s,CPUState * cpu)897 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
898 {
899 uint64_t total;
900
901 /*
902 * We need to lock all kvm slots for all address spaces here,
903 * because:
904 *
905 * (1) We need to mark dirty for dirty bitmaps in multiple slots
906 * and for tons of pages, so it's better to take the lock here
907 * once rather than once per page. And more importantly,
908 *
909 * (2) We must _NOT_ publish dirty bits to the other threads
910 * (e.g., the migration thread) via the kvm memory slot dirty
911 * bitmaps before correctly re-protect those dirtied pages.
912 * Otherwise we can have potential risk of data corruption if
913 * the page data is read in the other thread before we do
914 * reset below.
915 */
916 kvm_slots_lock();
917 total = kvm_dirty_ring_reap_locked(s, cpu);
918 kvm_slots_unlock();
919
920 return total;
921 }
922
do_kvm_cpu_synchronize_kick(CPUState * cpu,run_on_cpu_data arg)923 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
924 {
925 /* No need to do anything */
926 }
927
928 /*
929 * Kick all vcpus out in a synchronized way. When returned, we
930 * guarantee that every vcpu has been kicked and at least returned to
931 * userspace once.
932 */
kvm_cpu_synchronize_kick_all(void)933 static void kvm_cpu_synchronize_kick_all(void)
934 {
935 CPUState *cpu;
936
937 CPU_FOREACH(cpu) {
938 run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
939 }
940 }
941
942 /*
943 * Flush all the existing dirty pages to the KVM slot buffers. When
944 * this call returns, we guarantee that all the touched dirty pages
945 * before calling this function have been put into the per-kvmslot
946 * dirty bitmap.
947 *
948 * This function must be called with BQL held.
949 */
kvm_dirty_ring_flush(void)950 static void kvm_dirty_ring_flush(void)
951 {
952 trace_kvm_dirty_ring_flush(0);
953 /*
954 * The function needs to be serialized. Since this function
955 * should always be with BQL held, serialization is guaranteed.
956 * However, let's be sure of it.
957 */
958 assert(bql_locked());
959 /*
960 * First make sure to flush the hardware buffers by kicking all
961 * vcpus out in a synchronous way.
962 */
963 kvm_cpu_synchronize_kick_all();
964 kvm_dirty_ring_reap(kvm_state, NULL);
965 trace_kvm_dirty_ring_flush(1);
966 }
967
968 /**
969 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
970 *
971 * This function will first try to fetch dirty bitmap from the kernel,
972 * and then updates qemu's dirty bitmap.
973 *
974 * NOTE: caller must be with kml->slots_lock held.
975 *
976 * @kml: the KVM memory listener object
977 * @section: the memory section to sync the dirty bitmap with
978 */
kvm_physical_sync_dirty_bitmap(KVMMemoryListener * kml,MemoryRegionSection * section)979 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
980 MemoryRegionSection *section)
981 {
982 KVMState *s = kvm_state;
983 KVMSlot *mem;
984 hwaddr start_addr, size;
985 hwaddr slot_size;
986
987 size = kvm_align_section(section, &start_addr);
988 while (size) {
989 slot_size = MIN(kvm_max_slot_size, size);
990 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
991 if (!mem) {
992 /* We don't have a slot if we want to trap every access. */
993 return;
994 }
995 if (kvm_slot_get_dirty_log(s, mem)) {
996 kvm_slot_sync_dirty_pages(mem);
997 }
998 start_addr += slot_size;
999 size -= slot_size;
1000 }
1001 }
1002
1003 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
1004 #define KVM_CLEAR_LOG_SHIFT 6
1005 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
1006 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
1007
kvm_log_clear_one_slot(KVMSlot * mem,int as_id,uint64_t start,uint64_t size)1008 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
1009 uint64_t size)
1010 {
1011 KVMState *s = kvm_state;
1012 uint64_t end, bmap_start, start_delta, bmap_npages;
1013 struct kvm_clear_dirty_log d;
1014 unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
1015 int ret;
1016
1017 /*
1018 * We need to extend either the start or the size or both to
1019 * satisfy the KVM interface requirement. Firstly, do the start
1020 * page alignment on 64 host pages
1021 */
1022 bmap_start = start & KVM_CLEAR_LOG_MASK;
1023 start_delta = start - bmap_start;
1024 bmap_start /= psize;
1025
1026 /*
1027 * The kernel interface has restriction on the size too, that either:
1028 *
1029 * (1) the size is 64 host pages aligned (just like the start), or
1030 * (2) the size fills up until the end of the KVM memslot.
1031 */
1032 bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
1033 << KVM_CLEAR_LOG_SHIFT;
1034 end = mem->memory_size / psize;
1035 if (bmap_npages > end - bmap_start) {
1036 bmap_npages = end - bmap_start;
1037 }
1038 start_delta /= psize;
1039
1040 /*
1041 * Prepare the bitmap to clear dirty bits. Here we must guarantee
1042 * that we won't clear any unknown dirty bits otherwise we might
1043 * accidentally clear some set bits which are not yet synced from
1044 * the kernel into QEMU's bitmap, then we'll lose track of the
1045 * guest modifications upon those pages (which can directly lead
1046 * to guest data loss or panic after migration).
1047 *
1048 * Layout of the KVMSlot.dirty_bmap:
1049 *
1050 * |<-------- bmap_npages -----------..>|
1051 * [1]
1052 * start_delta size
1053 * |----------------|-------------|------------------|------------|
1054 * ^ ^ ^ ^
1055 * | | | |
1056 * start bmap_start (start) end
1057 * of memslot of memslot
1058 *
1059 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
1060 */
1061
1062 assert(bmap_start % BITS_PER_LONG == 0);
1063 /* We should never do log_clear before log_sync */
1064 assert(mem->dirty_bmap);
1065 if (start_delta || bmap_npages - size / psize) {
1066 /* Slow path - we need to manipulate a temp bitmap */
1067 bmap_clear = bitmap_new(bmap_npages);
1068 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
1069 bmap_start, start_delta + size / psize);
1070 /*
1071 * We need to fill the holes at start because that was not
1072 * specified by the caller and we extended the bitmap only for
1073 * 64 pages alignment
1074 */
1075 bitmap_clear(bmap_clear, 0, start_delta);
1076 d.dirty_bitmap = bmap_clear;
1077 } else {
1078 /*
1079 * Fast path - both start and size align well with BITS_PER_LONG
1080 * (or the end of memory slot)
1081 */
1082 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
1083 }
1084
1085 d.first_page = bmap_start;
1086 /* It should never overflow. If it happens, say something */
1087 assert(bmap_npages <= UINT32_MAX);
1088 d.num_pages = bmap_npages;
1089 d.slot = mem->slot | (as_id << 16);
1090
1091 ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
1092 if (ret < 0 && ret != -ENOENT) {
1093 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
1094 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
1095 __func__, d.slot, (uint64_t)d.first_page,
1096 (uint32_t)d.num_pages, ret);
1097 } else {
1098 ret = 0;
1099 trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
1100 }
1101
1102 /*
1103 * After we have updated the remote dirty bitmap, we update the
1104 * cached bitmap as well for the memslot, then if another user
1105 * clears the same region we know we shouldn't clear it again on
1106 * the remote otherwise it's data loss as well.
1107 */
1108 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
1109 size / psize);
1110 /* This handles the NULL case well */
1111 g_free(bmap_clear);
1112 return ret;
1113 }
1114
1115
1116 /**
1117 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1118 *
1119 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1120 * protection in the host kernel because in that case this operation
1121 * will be done within log_sync().
1122 *
1123 * @kml: the kvm memory listener
1124 * @section: the memory range to clear dirty bitmap
1125 */
kvm_physical_log_clear(KVMMemoryListener * kml,MemoryRegionSection * section)1126 static int kvm_physical_log_clear(KVMMemoryListener *kml,
1127 MemoryRegionSection *section)
1128 {
1129 KVMState *s = kvm_state;
1130 uint64_t start, size, offset, count;
1131 KVMSlot *mem;
1132 int ret = 0, i;
1133
1134 if (!s->manual_dirty_log_protect) {
1135 /* No need to do explicit clear */
1136 return ret;
1137 }
1138
1139 start = section->offset_within_address_space;
1140 size = int128_get64(section->size);
1141
1142 if (!size) {
1143 /* Nothing more we can do... */
1144 return ret;
1145 }
1146
1147 kvm_slots_lock();
1148
1149 for (i = 0; i < kml->nr_slots_allocated; i++) {
1150 mem = &kml->slots[i];
1151 /* Discard slots that are empty or do not overlap the section */
1152 if (!mem->memory_size ||
1153 mem->start_addr > start + size - 1 ||
1154 start > mem->start_addr + mem->memory_size - 1) {
1155 continue;
1156 }
1157
1158 if (start >= mem->start_addr) {
1159 /* The slot starts before section or is aligned to it. */
1160 offset = start - mem->start_addr;
1161 count = MIN(mem->memory_size - offset, size);
1162 } else {
1163 /* The slot starts after section. */
1164 offset = 0;
1165 count = MIN(mem->memory_size, size - (mem->start_addr - start));
1166 }
1167 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1168 if (ret < 0) {
1169 break;
1170 }
1171 }
1172
1173 kvm_slots_unlock();
1174
1175 return ret;
1176 }
1177
kvm_coalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1178 static void kvm_coalesce_mmio_region(MemoryListener *listener,
1179 MemoryRegionSection *secion,
1180 hwaddr start, hwaddr size)
1181 {
1182 KVMState *s = kvm_state;
1183
1184 if (s->coalesced_mmio) {
1185 struct kvm_coalesced_mmio_zone zone;
1186
1187 zone.addr = start;
1188 zone.size = size;
1189 zone.pad = 0;
1190
1191 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1192 }
1193 }
1194
kvm_uncoalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1195 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1196 MemoryRegionSection *secion,
1197 hwaddr start, hwaddr size)
1198 {
1199 KVMState *s = kvm_state;
1200
1201 if (s->coalesced_mmio) {
1202 struct kvm_coalesced_mmio_zone zone;
1203
1204 zone.addr = start;
1205 zone.size = size;
1206 zone.pad = 0;
1207
1208 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1209 }
1210 }
1211
kvm_coalesce_pio_add(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1212 static void kvm_coalesce_pio_add(MemoryListener *listener,
1213 MemoryRegionSection *section,
1214 hwaddr start, hwaddr size)
1215 {
1216 KVMState *s = kvm_state;
1217
1218 if (s->coalesced_pio) {
1219 struct kvm_coalesced_mmio_zone zone;
1220
1221 zone.addr = start;
1222 zone.size = size;
1223 zone.pio = 1;
1224
1225 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1226 }
1227 }
1228
kvm_coalesce_pio_del(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1229 static void kvm_coalesce_pio_del(MemoryListener *listener,
1230 MemoryRegionSection *section,
1231 hwaddr start, hwaddr size)
1232 {
1233 KVMState *s = kvm_state;
1234
1235 if (s->coalesced_pio) {
1236 struct kvm_coalesced_mmio_zone zone;
1237
1238 zone.addr = start;
1239 zone.size = size;
1240 zone.pio = 1;
1241
1242 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1243 }
1244 }
1245
kvm_check_extension(KVMState * s,unsigned int extension)1246 int kvm_check_extension(KVMState *s, unsigned int extension)
1247 {
1248 int ret;
1249
1250 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1251 if (ret < 0) {
1252 ret = 0;
1253 }
1254
1255 return ret;
1256 }
1257
kvm_vm_check_extension(KVMState * s,unsigned int extension)1258 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1259 {
1260 int ret;
1261
1262 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1263 if (ret < 0) {
1264 /* VM wide version not implemented, use global one instead */
1265 ret = kvm_check_extension(s, extension);
1266 }
1267
1268 return ret;
1269 }
1270
1271 /*
1272 * We track the poisoned pages to be able to:
1273 * - replace them on VM reset
1274 * - block a migration for a VM with a poisoned page
1275 */
1276 typedef struct HWPoisonPage {
1277 ram_addr_t ram_addr;
1278 QLIST_ENTRY(HWPoisonPage) list;
1279 } HWPoisonPage;
1280
1281 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1282 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1283
kvm_unpoison_all(void * param)1284 static void kvm_unpoison_all(void *param)
1285 {
1286 HWPoisonPage *page, *next_page;
1287
1288 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1289 QLIST_REMOVE(page, list);
1290 qemu_ram_remap(page->ram_addr);
1291 g_free(page);
1292 }
1293 }
1294
kvm_hwpoison_page_add(ram_addr_t ram_addr)1295 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1296 {
1297 HWPoisonPage *page;
1298
1299 QLIST_FOREACH(page, &hwpoison_page_list, list) {
1300 if (page->ram_addr == ram_addr) {
1301 return;
1302 }
1303 }
1304 page = g_new(HWPoisonPage, 1);
1305 page->ram_addr = ram_addr;
1306 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1307 }
1308
kvm_hwpoisoned_mem(void)1309 bool kvm_hwpoisoned_mem(void)
1310 {
1311 return !QLIST_EMPTY(&hwpoison_page_list);
1312 }
1313
adjust_ioeventfd_endianness(uint32_t val,uint32_t size)1314 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1315 {
1316 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1317 /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1318 * endianness, but the memory core hands them in target endianness.
1319 * For example, PPC is always treated as big-endian even if running
1320 * on KVM and on PPC64LE. Correct here.
1321 */
1322 switch (size) {
1323 case 2:
1324 val = bswap16(val);
1325 break;
1326 case 4:
1327 val = bswap32(val);
1328 break;
1329 }
1330 #endif
1331 return val;
1332 }
1333
kvm_set_ioeventfd_mmio(int fd,hwaddr addr,uint32_t val,bool assign,uint32_t size,bool datamatch)1334 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1335 bool assign, uint32_t size, bool datamatch)
1336 {
1337 int ret;
1338 struct kvm_ioeventfd iofd = {
1339 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1340 .addr = addr,
1341 .len = size,
1342 .flags = 0,
1343 .fd = fd,
1344 };
1345
1346 trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1347 datamatch);
1348 if (!kvm_enabled()) {
1349 return -ENOSYS;
1350 }
1351
1352 if (datamatch) {
1353 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1354 }
1355 if (!assign) {
1356 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1357 }
1358
1359 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1360
1361 if (ret < 0) {
1362 return -errno;
1363 }
1364
1365 return 0;
1366 }
1367
kvm_set_ioeventfd_pio(int fd,uint16_t addr,uint16_t val,bool assign,uint32_t size,bool datamatch)1368 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1369 bool assign, uint32_t size, bool datamatch)
1370 {
1371 struct kvm_ioeventfd kick = {
1372 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1373 .addr = addr,
1374 .flags = KVM_IOEVENTFD_FLAG_PIO,
1375 .len = size,
1376 .fd = fd,
1377 };
1378 int r;
1379 trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1380 if (!kvm_enabled()) {
1381 return -ENOSYS;
1382 }
1383 if (datamatch) {
1384 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1385 }
1386 if (!assign) {
1387 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1388 }
1389 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1390 if (r < 0) {
1391 return r;
1392 }
1393 return 0;
1394 }
1395
1396
1397 static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState * s,const KVMCapabilityInfo * list)1398 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1399 {
1400 while (list->name) {
1401 if (!kvm_check_extension(s, list->value)) {
1402 return list;
1403 }
1404 list++;
1405 }
1406 return NULL;
1407 }
1408
kvm_set_max_memslot_size(hwaddr max_slot_size)1409 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1410 {
1411 g_assert(
1412 ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1413 );
1414 kvm_max_slot_size = max_slot_size;
1415 }
1416
kvm_set_memory_attributes(hwaddr start,uint64_t size,uint64_t attr)1417 static int kvm_set_memory_attributes(hwaddr start, uint64_t size, uint64_t attr)
1418 {
1419 struct kvm_memory_attributes attrs;
1420 int r;
1421
1422 assert((attr & kvm_supported_memory_attributes) == attr);
1423 attrs.attributes = attr;
1424 attrs.address = start;
1425 attrs.size = size;
1426 attrs.flags = 0;
1427
1428 r = kvm_vm_ioctl(kvm_state, KVM_SET_MEMORY_ATTRIBUTES, &attrs);
1429 if (r) {
1430 error_report("failed to set memory (0x%" HWADDR_PRIx "+0x%" PRIx64 ") "
1431 "with attr 0x%" PRIx64 " error '%s'",
1432 start, size, attr, strerror(errno));
1433 }
1434 return r;
1435 }
1436
kvm_set_memory_attributes_private(hwaddr start,uint64_t size)1437 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size)
1438 {
1439 return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
1440 }
1441
kvm_set_memory_attributes_shared(hwaddr start,uint64_t size)1442 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size)
1443 {
1444 return kvm_set_memory_attributes(start, size, 0);
1445 }
1446
1447 /* Called with KVMMemoryListener.slots_lock held */
kvm_set_phys_mem(KVMMemoryListener * kml,MemoryRegionSection * section,bool add)1448 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1449 MemoryRegionSection *section, bool add)
1450 {
1451 KVMSlot *mem;
1452 int err;
1453 MemoryRegion *mr = section->mr;
1454 bool writable = !mr->readonly && !mr->rom_device;
1455 hwaddr start_addr, size, slot_size, mr_offset;
1456 ram_addr_t ram_start_offset;
1457 void *ram;
1458
1459 if (!memory_region_is_ram(mr)) {
1460 if (writable || !kvm_readonly_mem_allowed) {
1461 return;
1462 } else if (!mr->romd_mode) {
1463 /* If the memory device is not in romd_mode, then we actually want
1464 * to remove the kvm memory slot so all accesses will trap. */
1465 add = false;
1466 }
1467 }
1468
1469 size = kvm_align_section(section, &start_addr);
1470 if (!size) {
1471 return;
1472 }
1473
1474 /* The offset of the kvmslot within the memory region */
1475 mr_offset = section->offset_within_region + start_addr -
1476 section->offset_within_address_space;
1477
1478 /* use aligned delta to align the ram address and offset */
1479 ram = memory_region_get_ram_ptr(mr) + mr_offset;
1480 ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1481
1482 if (!add) {
1483 do {
1484 slot_size = MIN(kvm_max_slot_size, size);
1485 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1486 if (!mem) {
1487 return;
1488 }
1489 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1490 /*
1491 * NOTE: We should be aware of the fact that here we're only
1492 * doing a best effort to sync dirty bits. No matter whether
1493 * we're using dirty log or dirty ring, we ignored two facts:
1494 *
1495 * (1) dirty bits can reside in hardware buffers (PML)
1496 *
1497 * (2) after we collected dirty bits here, pages can be dirtied
1498 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1499 * remove the slot.
1500 *
1501 * Not easy. Let's cross the fingers until it's fixed.
1502 */
1503 if (kvm_state->kvm_dirty_ring_size) {
1504 kvm_dirty_ring_reap_locked(kvm_state, NULL);
1505 if (kvm_state->kvm_dirty_ring_with_bitmap) {
1506 kvm_slot_sync_dirty_pages(mem);
1507 kvm_slot_get_dirty_log(kvm_state, mem);
1508 }
1509 } else {
1510 kvm_slot_get_dirty_log(kvm_state, mem);
1511 }
1512 kvm_slot_sync_dirty_pages(mem);
1513 }
1514
1515 /* unregister the slot */
1516 g_free(mem->dirty_bmap);
1517 mem->dirty_bmap = NULL;
1518 mem->memory_size = 0;
1519 mem->flags = 0;
1520 err = kvm_set_user_memory_region(kml, mem, false);
1521 if (err) {
1522 fprintf(stderr, "%s: error unregistering slot: %s\n",
1523 __func__, strerror(-err));
1524 abort();
1525 }
1526 start_addr += slot_size;
1527 size -= slot_size;
1528 kml->nr_slots_used--;
1529 } while (size);
1530 return;
1531 }
1532
1533 /* register the new slot */
1534 do {
1535 slot_size = MIN(kvm_max_slot_size, size);
1536 mem = kvm_alloc_slot(kml);
1537 mem->as_id = kml->as_id;
1538 mem->memory_size = slot_size;
1539 mem->start_addr = start_addr;
1540 mem->ram_start_offset = ram_start_offset;
1541 mem->ram = ram;
1542 mem->flags = kvm_mem_flags(mr);
1543 mem->guest_memfd = mr->ram_block->guest_memfd;
1544 mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host;
1545
1546 kvm_slot_init_dirty_bitmap(mem);
1547 err = kvm_set_user_memory_region(kml, mem, true);
1548 if (err) {
1549 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1550 strerror(-err));
1551 abort();
1552 }
1553
1554 if (memory_region_has_guest_memfd(mr)) {
1555 err = kvm_set_memory_attributes_private(start_addr, slot_size);
1556 if (err) {
1557 error_report("%s: failed to set memory attribute private: %s",
1558 __func__, strerror(-err));
1559 exit(1);
1560 }
1561 }
1562
1563 start_addr += slot_size;
1564 ram_start_offset += slot_size;
1565 ram += slot_size;
1566 size -= slot_size;
1567 kml->nr_slots_used++;
1568 } while (size);
1569 }
1570
kvm_dirty_ring_reaper_thread(void * data)1571 static void *kvm_dirty_ring_reaper_thread(void *data)
1572 {
1573 KVMState *s = data;
1574 struct KVMDirtyRingReaper *r = &s->reaper;
1575
1576 rcu_register_thread();
1577
1578 trace_kvm_dirty_ring_reaper("init");
1579
1580 while (true) {
1581 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1582 trace_kvm_dirty_ring_reaper("wait");
1583 /*
1584 * TODO: provide a smarter timeout rather than a constant?
1585 */
1586 sleep(1);
1587
1588 /* keep sleeping so that dirtylimit not be interfered by reaper */
1589 if (dirtylimit_in_service()) {
1590 continue;
1591 }
1592
1593 trace_kvm_dirty_ring_reaper("wakeup");
1594 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1595
1596 bql_lock();
1597 kvm_dirty_ring_reap(s, NULL);
1598 bql_unlock();
1599
1600 r->reaper_iteration++;
1601 }
1602
1603 g_assert_not_reached();
1604 }
1605
kvm_dirty_ring_reaper_init(KVMState * s)1606 static void kvm_dirty_ring_reaper_init(KVMState *s)
1607 {
1608 struct KVMDirtyRingReaper *r = &s->reaper;
1609
1610 qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1611 kvm_dirty_ring_reaper_thread,
1612 s, QEMU_THREAD_JOINABLE);
1613 }
1614
kvm_dirty_ring_init(KVMState * s)1615 static int kvm_dirty_ring_init(KVMState *s)
1616 {
1617 uint32_t ring_size = s->kvm_dirty_ring_size;
1618 uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
1619 unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
1620 int ret;
1621
1622 s->kvm_dirty_ring_size = 0;
1623 s->kvm_dirty_ring_bytes = 0;
1624
1625 /* Bail if the dirty ring size isn't specified */
1626 if (!ring_size) {
1627 return 0;
1628 }
1629
1630 /*
1631 * Read the max supported pages. Fall back to dirty logging mode
1632 * if the dirty ring isn't supported.
1633 */
1634 ret = kvm_vm_check_extension(s, capability);
1635 if (ret <= 0) {
1636 capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
1637 ret = kvm_vm_check_extension(s, capability);
1638 }
1639
1640 if (ret <= 0) {
1641 warn_report("KVM dirty ring not available, using bitmap method");
1642 return 0;
1643 }
1644
1645 if (ring_bytes > ret) {
1646 error_report("KVM dirty ring size %" PRIu32 " too big "
1647 "(maximum is %ld). Please use a smaller value.",
1648 ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
1649 return -EINVAL;
1650 }
1651
1652 ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
1653 if (ret) {
1654 error_report("Enabling of KVM dirty ring failed: %s. "
1655 "Suggested minimum value is 1024.", strerror(-ret));
1656 return -EIO;
1657 }
1658
1659 /* Enable the backup bitmap if it is supported */
1660 ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
1661 if (ret > 0) {
1662 ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
1663 if (ret) {
1664 error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1665 "%s. ", strerror(-ret));
1666 return -EIO;
1667 }
1668
1669 s->kvm_dirty_ring_with_bitmap = true;
1670 }
1671
1672 s->kvm_dirty_ring_size = ring_size;
1673 s->kvm_dirty_ring_bytes = ring_bytes;
1674
1675 return 0;
1676 }
1677
kvm_region_add(MemoryListener * listener,MemoryRegionSection * section)1678 static void kvm_region_add(MemoryListener *listener,
1679 MemoryRegionSection *section)
1680 {
1681 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1682 KVMMemoryUpdate *update;
1683
1684 update = g_new0(KVMMemoryUpdate, 1);
1685 update->section = *section;
1686
1687 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
1688 }
1689
kvm_region_del(MemoryListener * listener,MemoryRegionSection * section)1690 static void kvm_region_del(MemoryListener *listener,
1691 MemoryRegionSection *section)
1692 {
1693 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1694 KVMMemoryUpdate *update;
1695
1696 update = g_new0(KVMMemoryUpdate, 1);
1697 update->section = *section;
1698
1699 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
1700 }
1701
kvm_region_commit(MemoryListener * listener)1702 static void kvm_region_commit(MemoryListener *listener)
1703 {
1704 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
1705 listener);
1706 KVMMemoryUpdate *u1, *u2;
1707 bool need_inhibit = false;
1708
1709 if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
1710 QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1711 return;
1712 }
1713
1714 /*
1715 * We have to be careful when regions to add overlap with ranges to remove.
1716 * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1717 * is currently active.
1718 *
1719 * The lists are order by addresses, so it's easy to find overlaps.
1720 */
1721 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1722 u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
1723 while (u1 && u2) {
1724 Range r1, r2;
1725
1726 range_init_nofail(&r1, u1->section.offset_within_address_space,
1727 int128_get64(u1->section.size));
1728 range_init_nofail(&r2, u2->section.offset_within_address_space,
1729 int128_get64(u2->section.size));
1730
1731 if (range_overlaps_range(&r1, &r2)) {
1732 need_inhibit = true;
1733 break;
1734 }
1735 if (range_lob(&r1) < range_lob(&r2)) {
1736 u1 = QSIMPLEQ_NEXT(u1, next);
1737 } else {
1738 u2 = QSIMPLEQ_NEXT(u2, next);
1739 }
1740 }
1741
1742 kvm_slots_lock();
1743 if (need_inhibit) {
1744 accel_ioctl_inhibit_begin();
1745 }
1746
1747 /* Remove all memslots before adding the new ones. */
1748 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1749 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1750 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
1751
1752 kvm_set_phys_mem(kml, &u1->section, false);
1753 memory_region_unref(u1->section.mr);
1754
1755 g_free(u1);
1756 }
1757 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
1758 u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
1759 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
1760
1761 memory_region_ref(u1->section.mr);
1762 kvm_set_phys_mem(kml, &u1->section, true);
1763
1764 g_free(u1);
1765 }
1766
1767 if (need_inhibit) {
1768 accel_ioctl_inhibit_end();
1769 }
1770 kvm_slots_unlock();
1771 }
1772
kvm_log_sync(MemoryListener * listener,MemoryRegionSection * section)1773 static void kvm_log_sync(MemoryListener *listener,
1774 MemoryRegionSection *section)
1775 {
1776 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1777
1778 kvm_slots_lock();
1779 kvm_physical_sync_dirty_bitmap(kml, section);
1780 kvm_slots_unlock();
1781 }
1782
kvm_log_sync_global(MemoryListener * l,bool last_stage)1783 static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
1784 {
1785 KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1786 KVMState *s = kvm_state;
1787 KVMSlot *mem;
1788 int i;
1789
1790 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1791 kvm_dirty_ring_flush();
1792
1793 kvm_slots_lock();
1794 for (i = 0; i < kml->nr_slots_allocated; i++) {
1795 mem = &kml->slots[i];
1796 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1797 kvm_slot_sync_dirty_pages(mem);
1798
1799 if (s->kvm_dirty_ring_with_bitmap && last_stage &&
1800 kvm_slot_get_dirty_log(s, mem)) {
1801 kvm_slot_sync_dirty_pages(mem);
1802 }
1803
1804 /*
1805 * This is not needed by KVM_GET_DIRTY_LOG because the
1806 * ioctl will unconditionally overwrite the whole region.
1807 * However kvm dirty ring has no such side effect.
1808 */
1809 kvm_slot_reset_dirty_pages(mem);
1810 }
1811 }
1812 kvm_slots_unlock();
1813 }
1814
kvm_log_clear(MemoryListener * listener,MemoryRegionSection * section)1815 static void kvm_log_clear(MemoryListener *listener,
1816 MemoryRegionSection *section)
1817 {
1818 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1819 int r;
1820
1821 r = kvm_physical_log_clear(kml, section);
1822 if (r < 0) {
1823 error_report_once("%s: kvm log clear failed: mr=%s "
1824 "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1825 section->mr->name, section->offset_within_region,
1826 int128_get64(section->size));
1827 abort();
1828 }
1829 }
1830
kvm_mem_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1831 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1832 MemoryRegionSection *section,
1833 bool match_data, uint64_t data,
1834 EventNotifier *e)
1835 {
1836 int fd = event_notifier_get_fd(e);
1837 int r;
1838
1839 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1840 data, true, int128_get64(section->size),
1841 match_data);
1842 if (r < 0) {
1843 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1844 __func__, strerror(-r), -r);
1845 abort();
1846 }
1847 }
1848
kvm_mem_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1849 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1850 MemoryRegionSection *section,
1851 bool match_data, uint64_t data,
1852 EventNotifier *e)
1853 {
1854 int fd = event_notifier_get_fd(e);
1855 int r;
1856
1857 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1858 data, false, int128_get64(section->size),
1859 match_data);
1860 if (r < 0) {
1861 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1862 __func__, strerror(-r), -r);
1863 abort();
1864 }
1865 }
1866
kvm_io_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1867 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1868 MemoryRegionSection *section,
1869 bool match_data, uint64_t data,
1870 EventNotifier *e)
1871 {
1872 int fd = event_notifier_get_fd(e);
1873 int r;
1874
1875 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1876 data, true, int128_get64(section->size),
1877 match_data);
1878 if (r < 0) {
1879 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1880 __func__, strerror(-r), -r);
1881 abort();
1882 }
1883 }
1884
kvm_io_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1885 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1886 MemoryRegionSection *section,
1887 bool match_data, uint64_t data,
1888 EventNotifier *e)
1889
1890 {
1891 int fd = event_notifier_get_fd(e);
1892 int r;
1893
1894 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1895 data, false, int128_get64(section->size),
1896 match_data);
1897 if (r < 0) {
1898 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1899 __func__, strerror(-r), -r);
1900 abort();
1901 }
1902 }
1903
kvm_memory_listener_register(KVMState * s,KVMMemoryListener * kml,AddressSpace * as,int as_id,const char * name)1904 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1905 AddressSpace *as, int as_id, const char *name)
1906 {
1907 int i;
1908
1909 kml->as_id = as_id;
1910
1911 kvm_slots_grow(kml, KVM_MEMSLOTS_NR_ALLOC_DEFAULT);
1912
1913 QSIMPLEQ_INIT(&kml->transaction_add);
1914 QSIMPLEQ_INIT(&kml->transaction_del);
1915
1916 kml->listener.region_add = kvm_region_add;
1917 kml->listener.region_del = kvm_region_del;
1918 kml->listener.commit = kvm_region_commit;
1919 kml->listener.log_start = kvm_log_start;
1920 kml->listener.log_stop = kvm_log_stop;
1921 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
1922 kml->listener.name = name;
1923
1924 if (s->kvm_dirty_ring_size) {
1925 kml->listener.log_sync_global = kvm_log_sync_global;
1926 } else {
1927 kml->listener.log_sync = kvm_log_sync;
1928 kml->listener.log_clear = kvm_log_clear;
1929 }
1930
1931 memory_listener_register(&kml->listener, as);
1932
1933 for (i = 0; i < s->nr_as; ++i) {
1934 if (!s->as[i].as) {
1935 s->as[i].as = as;
1936 s->as[i].ml = kml;
1937 break;
1938 }
1939 }
1940 }
1941
1942 static MemoryListener kvm_io_listener = {
1943 .name = "kvm-io",
1944 .coalesced_io_add = kvm_coalesce_pio_add,
1945 .coalesced_io_del = kvm_coalesce_pio_del,
1946 .eventfd_add = kvm_io_ioeventfd_add,
1947 .eventfd_del = kvm_io_ioeventfd_del,
1948 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
1949 };
1950
kvm_set_irq(KVMState * s,int irq,int level)1951 int kvm_set_irq(KVMState *s, int irq, int level)
1952 {
1953 struct kvm_irq_level event;
1954 int ret;
1955
1956 assert(kvm_async_interrupts_enabled());
1957
1958 event.level = level;
1959 event.irq = irq;
1960 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1961 if (ret < 0) {
1962 perror("kvm_set_irq");
1963 abort();
1964 }
1965
1966 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1967 }
1968
1969 #ifdef KVM_CAP_IRQ_ROUTING
1970 typedef struct KVMMSIRoute {
1971 struct kvm_irq_routing_entry kroute;
1972 QTAILQ_ENTRY(KVMMSIRoute) entry;
1973 } KVMMSIRoute;
1974
set_gsi(KVMState * s,unsigned int gsi)1975 static void set_gsi(KVMState *s, unsigned int gsi)
1976 {
1977 set_bit(gsi, s->used_gsi_bitmap);
1978 }
1979
clear_gsi(KVMState * s,unsigned int gsi)1980 static void clear_gsi(KVMState *s, unsigned int gsi)
1981 {
1982 clear_bit(gsi, s->used_gsi_bitmap);
1983 }
1984
kvm_init_irq_routing(KVMState * s)1985 void kvm_init_irq_routing(KVMState *s)
1986 {
1987 int gsi_count;
1988
1989 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1990 if (gsi_count > 0) {
1991 /* Round up so we can search ints using ffs */
1992 s->used_gsi_bitmap = bitmap_new(gsi_count);
1993 s->gsi_count = gsi_count;
1994 }
1995
1996 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1997 s->nr_allocated_irq_routes = 0;
1998
1999 kvm_arch_init_irq_routing(s);
2000 }
2001
kvm_irqchip_commit_routes(KVMState * s)2002 void kvm_irqchip_commit_routes(KVMState *s)
2003 {
2004 int ret;
2005
2006 if (kvm_gsi_direct_mapping()) {
2007 return;
2008 }
2009
2010 if (!kvm_gsi_routing_enabled()) {
2011 return;
2012 }
2013
2014 s->irq_routes->flags = 0;
2015 trace_kvm_irqchip_commit_routes();
2016 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
2017 assert(ret == 0);
2018 }
2019
kvm_add_routing_entry(KVMState * s,struct kvm_irq_routing_entry * entry)2020 void kvm_add_routing_entry(KVMState *s,
2021 struct kvm_irq_routing_entry *entry)
2022 {
2023 struct kvm_irq_routing_entry *new;
2024 int n, size;
2025
2026 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
2027 n = s->nr_allocated_irq_routes * 2;
2028 if (n < 64) {
2029 n = 64;
2030 }
2031 size = sizeof(struct kvm_irq_routing);
2032 size += n * sizeof(*new);
2033 s->irq_routes = g_realloc(s->irq_routes, size);
2034 s->nr_allocated_irq_routes = n;
2035 }
2036 n = s->irq_routes->nr++;
2037 new = &s->irq_routes->entries[n];
2038
2039 *new = *entry;
2040
2041 set_gsi(s, entry->gsi);
2042 }
2043
kvm_update_routing_entry(KVMState * s,struct kvm_irq_routing_entry * new_entry)2044 static int kvm_update_routing_entry(KVMState *s,
2045 struct kvm_irq_routing_entry *new_entry)
2046 {
2047 struct kvm_irq_routing_entry *entry;
2048 int n;
2049
2050 for (n = 0; n < s->irq_routes->nr; n++) {
2051 entry = &s->irq_routes->entries[n];
2052 if (entry->gsi != new_entry->gsi) {
2053 continue;
2054 }
2055
2056 if(!memcmp(entry, new_entry, sizeof *entry)) {
2057 return 0;
2058 }
2059
2060 *entry = *new_entry;
2061
2062 return 0;
2063 }
2064
2065 return -ESRCH;
2066 }
2067
kvm_irqchip_add_irq_route(KVMState * s,int irq,int irqchip,int pin)2068 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
2069 {
2070 struct kvm_irq_routing_entry e = {};
2071
2072 assert(pin < s->gsi_count);
2073
2074 e.gsi = irq;
2075 e.type = KVM_IRQ_ROUTING_IRQCHIP;
2076 e.flags = 0;
2077 e.u.irqchip.irqchip = irqchip;
2078 e.u.irqchip.pin = pin;
2079 kvm_add_routing_entry(s, &e);
2080 }
2081
kvm_irqchip_release_virq(KVMState * s,int virq)2082 void kvm_irqchip_release_virq(KVMState *s, int virq)
2083 {
2084 struct kvm_irq_routing_entry *e;
2085 int i;
2086
2087 if (kvm_gsi_direct_mapping()) {
2088 return;
2089 }
2090
2091 for (i = 0; i < s->irq_routes->nr; i++) {
2092 e = &s->irq_routes->entries[i];
2093 if (e->gsi == virq) {
2094 s->irq_routes->nr--;
2095 *e = s->irq_routes->entries[s->irq_routes->nr];
2096 }
2097 }
2098 clear_gsi(s, virq);
2099 kvm_arch_release_virq_post(virq);
2100 trace_kvm_irqchip_release_virq(virq);
2101 }
2102
kvm_irqchip_add_change_notifier(Notifier * n)2103 void kvm_irqchip_add_change_notifier(Notifier *n)
2104 {
2105 notifier_list_add(&kvm_irqchip_change_notifiers, n);
2106 }
2107
kvm_irqchip_remove_change_notifier(Notifier * n)2108 void kvm_irqchip_remove_change_notifier(Notifier *n)
2109 {
2110 notifier_remove(n);
2111 }
2112
kvm_irqchip_change_notify(void)2113 void kvm_irqchip_change_notify(void)
2114 {
2115 notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
2116 }
2117
kvm_irqchip_get_virq(KVMState * s)2118 int kvm_irqchip_get_virq(KVMState *s)
2119 {
2120 int next_virq;
2121
2122 /* Return the lowest unused GSI in the bitmap */
2123 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
2124 if (next_virq >= s->gsi_count) {
2125 return -ENOSPC;
2126 } else {
2127 return next_virq;
2128 }
2129 }
2130
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2131 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2132 {
2133 struct kvm_msi msi;
2134
2135 msi.address_lo = (uint32_t)msg.address;
2136 msi.address_hi = msg.address >> 32;
2137 msi.data = le32_to_cpu(msg.data);
2138 msi.flags = 0;
2139 memset(msi.pad, 0, sizeof(msi.pad));
2140
2141 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
2142 }
2143
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2144 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2145 {
2146 struct kvm_irq_routing_entry kroute = {};
2147 int virq;
2148 KVMState *s = c->s;
2149 MSIMessage msg = {0, 0};
2150
2151 if (pci_available && dev) {
2152 msg = pci_get_msi_message(dev, vector);
2153 }
2154
2155 if (kvm_gsi_direct_mapping()) {
2156 return kvm_arch_msi_data_to_gsi(msg.data);
2157 }
2158
2159 if (!kvm_gsi_routing_enabled()) {
2160 return -ENOSYS;
2161 }
2162
2163 virq = kvm_irqchip_get_virq(s);
2164 if (virq < 0) {
2165 return virq;
2166 }
2167
2168 kroute.gsi = virq;
2169 kroute.type = KVM_IRQ_ROUTING_MSI;
2170 kroute.flags = 0;
2171 kroute.u.msi.address_lo = (uint32_t)msg.address;
2172 kroute.u.msi.address_hi = msg.address >> 32;
2173 kroute.u.msi.data = le32_to_cpu(msg.data);
2174 if (pci_available && kvm_msi_devid_required()) {
2175 kroute.flags = KVM_MSI_VALID_DEVID;
2176 kroute.u.msi.devid = pci_requester_id(dev);
2177 }
2178 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2179 kvm_irqchip_release_virq(s, virq);
2180 return -EINVAL;
2181 }
2182
2183 if (s->irq_routes->nr < s->gsi_count) {
2184 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2185 vector, virq);
2186
2187 kvm_add_routing_entry(s, &kroute);
2188 kvm_arch_add_msi_route_post(&kroute, vector, dev);
2189 c->changes++;
2190 } else {
2191 kvm_irqchip_release_virq(s, virq);
2192 return -ENOSPC;
2193 }
2194
2195 return virq;
2196 }
2197
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg,PCIDevice * dev)2198 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2199 PCIDevice *dev)
2200 {
2201 struct kvm_irq_routing_entry kroute = {};
2202
2203 if (kvm_gsi_direct_mapping()) {
2204 return 0;
2205 }
2206
2207 if (!kvm_irqchip_in_kernel()) {
2208 return -ENOSYS;
2209 }
2210
2211 kroute.gsi = virq;
2212 kroute.type = KVM_IRQ_ROUTING_MSI;
2213 kroute.flags = 0;
2214 kroute.u.msi.address_lo = (uint32_t)msg.address;
2215 kroute.u.msi.address_hi = msg.address >> 32;
2216 kroute.u.msi.data = le32_to_cpu(msg.data);
2217 if (pci_available && kvm_msi_devid_required()) {
2218 kroute.flags = KVM_MSI_VALID_DEVID;
2219 kroute.u.msi.devid = pci_requester_id(dev);
2220 }
2221 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2222 return -EINVAL;
2223 }
2224
2225 trace_kvm_irqchip_update_msi_route(virq);
2226
2227 return kvm_update_routing_entry(s, &kroute);
2228 }
2229
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2230 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2231 EventNotifier *resample, int virq,
2232 bool assign)
2233 {
2234 int fd = event_notifier_get_fd(event);
2235 int rfd = resample ? event_notifier_get_fd(resample) : -1;
2236
2237 struct kvm_irqfd irqfd = {
2238 .fd = fd,
2239 .gsi = virq,
2240 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2241 };
2242
2243 if (rfd != -1) {
2244 assert(assign);
2245 if (kvm_irqchip_is_split()) {
2246 /*
2247 * When the slow irqchip (e.g. IOAPIC) is in the
2248 * userspace, KVM kernel resamplefd will not work because
2249 * the EOI of the interrupt will be delivered to userspace
2250 * instead, so the KVM kernel resamplefd kick will be
2251 * skipped. The userspace here mimics what the kernel
2252 * provides with resamplefd, remember the resamplefd and
2253 * kick it when we receive EOI of this IRQ.
2254 *
2255 * This is hackery because IOAPIC is mostly bypassed
2256 * (except EOI broadcasts) when irqfd is used. However
2257 * this can bring much performance back for split irqchip
2258 * with INTx IRQs (for VFIO, this gives 93% perf of the
2259 * full fast path, which is 46% perf boost comparing to
2260 * the INTx slow path).
2261 */
2262 kvm_resample_fd_insert(virq, resample);
2263 } else {
2264 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2265 irqfd.resamplefd = rfd;
2266 }
2267 } else if (!assign) {
2268 if (kvm_irqchip_is_split()) {
2269 kvm_resample_fd_remove(virq);
2270 }
2271 }
2272
2273 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2274 }
2275
2276 #else /* !KVM_CAP_IRQ_ROUTING */
2277
kvm_init_irq_routing(KVMState * s)2278 void kvm_init_irq_routing(KVMState *s)
2279 {
2280 }
2281
kvm_irqchip_release_virq(KVMState * s,int virq)2282 void kvm_irqchip_release_virq(KVMState *s, int virq)
2283 {
2284 }
2285
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2286 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2287 {
2288 abort();
2289 }
2290
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2291 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2292 {
2293 return -ENOSYS;
2294 }
2295
kvm_irqchip_add_adapter_route(KVMState * s,AdapterInfo * adapter)2296 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2297 {
2298 return -ENOSYS;
2299 }
2300
kvm_irqchip_add_hv_sint_route(KVMState * s,uint32_t vcpu,uint32_t sint)2301 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2302 {
2303 return -ENOSYS;
2304 }
2305
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2306 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2307 EventNotifier *resample, int virq,
2308 bool assign)
2309 {
2310 abort();
2311 }
2312
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg)2313 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2314 {
2315 return -ENOSYS;
2316 }
2317 #endif /* !KVM_CAP_IRQ_ROUTING */
2318
kvm_irqchip_add_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,EventNotifier * rn,int virq)2319 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2320 EventNotifier *rn, int virq)
2321 {
2322 return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2323 }
2324
kvm_irqchip_remove_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,int virq)2325 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2326 int virq)
2327 {
2328 return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2329 }
2330
kvm_irqchip_add_irqfd_notifier(KVMState * s,EventNotifier * n,EventNotifier * rn,qemu_irq irq)2331 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2332 EventNotifier *rn, qemu_irq irq)
2333 {
2334 gpointer key, gsi;
2335 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2336
2337 if (!found) {
2338 return -ENXIO;
2339 }
2340 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2341 }
2342
kvm_irqchip_remove_irqfd_notifier(KVMState * s,EventNotifier * n,qemu_irq irq)2343 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2344 qemu_irq irq)
2345 {
2346 gpointer key, gsi;
2347 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2348
2349 if (!found) {
2350 return -ENXIO;
2351 }
2352 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2353 }
2354
kvm_irqchip_set_qemuirq_gsi(KVMState * s,qemu_irq irq,int gsi)2355 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2356 {
2357 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2358 }
2359
kvm_irqchip_create(KVMState * s)2360 static void kvm_irqchip_create(KVMState *s)
2361 {
2362 int ret;
2363
2364 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2365 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2366 ;
2367 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2368 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2369 if (ret < 0) {
2370 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2371 exit(1);
2372 }
2373 } else {
2374 return;
2375 }
2376
2377 if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
2378 fprintf(stderr, "kvm: irqfd not implemented\n");
2379 exit(1);
2380 }
2381
2382 /* First probe and see if there's a arch-specific hook to create the
2383 * in-kernel irqchip for us */
2384 ret = kvm_arch_irqchip_create(s);
2385 if (ret == 0) {
2386 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2387 error_report("Split IRQ chip mode not supported.");
2388 exit(1);
2389 } else {
2390 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2391 }
2392 }
2393 if (ret < 0) {
2394 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2395 exit(1);
2396 }
2397
2398 kvm_kernel_irqchip = true;
2399 /* If we have an in-kernel IRQ chip then we must have asynchronous
2400 * interrupt delivery (though the reverse is not necessarily true)
2401 */
2402 kvm_async_interrupts_allowed = true;
2403 kvm_halt_in_kernel_allowed = true;
2404
2405 kvm_init_irq_routing(s);
2406
2407 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2408 }
2409
2410 /* Find number of supported CPUs using the recommended
2411 * procedure from the kernel API documentation to cope with
2412 * older kernels that may be missing capabilities.
2413 */
kvm_recommended_vcpus(KVMState * s)2414 static int kvm_recommended_vcpus(KVMState *s)
2415 {
2416 int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2417 return (ret) ? ret : 4;
2418 }
2419
kvm_max_vcpus(KVMState * s)2420 static int kvm_max_vcpus(KVMState *s)
2421 {
2422 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
2423 return (ret) ? ret : kvm_recommended_vcpus(s);
2424 }
2425
kvm_max_vcpu_id(KVMState * s)2426 static int kvm_max_vcpu_id(KVMState *s)
2427 {
2428 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2429 return (ret) ? ret : kvm_max_vcpus(s);
2430 }
2431
kvm_vcpu_id_is_valid(int vcpu_id)2432 bool kvm_vcpu_id_is_valid(int vcpu_id)
2433 {
2434 KVMState *s = KVM_STATE(current_accel());
2435 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2436 }
2437
kvm_dirty_ring_enabled(void)2438 bool kvm_dirty_ring_enabled(void)
2439 {
2440 return kvm_state && kvm_state->kvm_dirty_ring_size;
2441 }
2442
2443 static void query_stats_cb(StatsResultList **result, StatsTarget target,
2444 strList *names, strList *targets, Error **errp);
2445 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2446
kvm_dirty_ring_size(void)2447 uint32_t kvm_dirty_ring_size(void)
2448 {
2449 return kvm_state->kvm_dirty_ring_size;
2450 }
2451
do_kvm_create_vm(MachineState * ms,int type)2452 static int do_kvm_create_vm(MachineState *ms, int type)
2453 {
2454 KVMState *s;
2455 int ret;
2456
2457 s = KVM_STATE(ms->accelerator);
2458
2459 do {
2460 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2461 } while (ret == -EINTR);
2462
2463 if (ret < 0) {
2464 error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret));
2465
2466 #ifdef TARGET_S390X
2467 if (ret == -EINVAL) {
2468 error_printf("Host kernel setup problem detected."
2469 " Please verify:\n");
2470 error_printf("- for kernels supporting the"
2471 " switch_amode or user_mode parameters, whether");
2472 error_printf(" user space is running in primary address space\n");
2473 error_printf("- for kernels supporting the vm.allocate_pgste"
2474 " sysctl, whether it is enabled\n");
2475 }
2476 #elif defined(TARGET_PPC)
2477 if (ret == -EINVAL) {
2478 error_printf("PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2479 (type == 2) ? "pr" : "hv");
2480 }
2481 #endif
2482 }
2483
2484 return ret;
2485 }
2486
find_kvm_machine_type(MachineState * ms)2487 static int find_kvm_machine_type(MachineState *ms)
2488 {
2489 MachineClass *mc = MACHINE_GET_CLASS(ms);
2490 int type;
2491
2492 if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2493 g_autofree char *kvm_type;
2494 kvm_type = object_property_get_str(OBJECT(current_machine),
2495 "kvm-type",
2496 &error_abort);
2497 type = mc->kvm_type(ms, kvm_type);
2498 } else if (mc->kvm_type) {
2499 type = mc->kvm_type(ms, NULL);
2500 } else {
2501 type = kvm_arch_get_default_type(ms);
2502 }
2503 return type;
2504 }
2505
kvm_setup_dirty_ring(KVMState * s)2506 static int kvm_setup_dirty_ring(KVMState *s)
2507 {
2508 uint64_t dirty_log_manual_caps;
2509 int ret;
2510
2511 /*
2512 * Enable KVM dirty ring if supported, otherwise fall back to
2513 * dirty logging mode
2514 */
2515 ret = kvm_dirty_ring_init(s);
2516 if (ret < 0) {
2517 return ret;
2518 }
2519
2520 /*
2521 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2522 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2523 * page is wr-protected initially, which is against how kvm dirty ring is
2524 * usage - kvm dirty ring requires all pages are wr-protected at the very
2525 * beginning. Enabling this feature for dirty ring causes data corruption.
2526 *
2527 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2528 * we may expect a higher stall time when starting the migration. In the
2529 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2530 * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2531 * guest pages.
2532 */
2533 if (!s->kvm_dirty_ring_size) {
2534 dirty_log_manual_caps =
2535 kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2536 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2537 KVM_DIRTY_LOG_INITIALLY_SET);
2538 s->manual_dirty_log_protect = dirty_log_manual_caps;
2539 if (dirty_log_manual_caps) {
2540 ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2541 dirty_log_manual_caps);
2542 if (ret) {
2543 warn_report("Trying to enable capability %"PRIu64" of "
2544 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2545 "Falling back to the legacy mode. ",
2546 dirty_log_manual_caps);
2547 s->manual_dirty_log_protect = 0;
2548 }
2549 }
2550 }
2551
2552 return 0;
2553 }
2554
kvm_init(MachineState * ms)2555 static int kvm_init(MachineState *ms)
2556 {
2557 MachineClass *mc = MACHINE_GET_CLASS(ms);
2558 static const char upgrade_note[] =
2559 "Please upgrade to at least kernel 4.5.\n";
2560 const struct {
2561 const char *name;
2562 int num;
2563 } num_cpus[] = {
2564 { "SMP", ms->smp.cpus },
2565 { "hotpluggable", ms->smp.max_cpus },
2566 { /* end of list */ }
2567 }, *nc = num_cpus;
2568 int soft_vcpus_limit, hard_vcpus_limit;
2569 KVMState *s;
2570 const KVMCapabilityInfo *missing_cap;
2571 int ret;
2572 int type;
2573
2574 qemu_mutex_init(&kml_slots_lock);
2575
2576 s = KVM_STATE(ms->accelerator);
2577
2578 /*
2579 * On systems where the kernel can support different base page
2580 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2581 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2582 * page size for the system though.
2583 */
2584 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2585
2586 s->sigmask_len = 8;
2587 accel_blocker_init();
2588
2589 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2590 QTAILQ_INIT(&s->kvm_sw_breakpoints);
2591 #endif
2592 QLIST_INIT(&s->kvm_parked_vcpus);
2593 s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR);
2594 if (s->fd == -1) {
2595 error_report("Could not access KVM kernel module: %m");
2596 ret = -errno;
2597 goto err;
2598 }
2599
2600 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2601 if (ret < KVM_API_VERSION) {
2602 if (ret >= 0) {
2603 ret = -EINVAL;
2604 }
2605 error_report("kvm version too old");
2606 goto err;
2607 }
2608
2609 if (ret > KVM_API_VERSION) {
2610 ret = -EINVAL;
2611 error_report("kvm version not supported");
2612 goto err;
2613 }
2614
2615 kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2616 s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2617
2618 /* If unspecified, use the default value */
2619 if (!s->nr_slots_max) {
2620 s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT;
2621 }
2622
2623 type = find_kvm_machine_type(ms);
2624 if (type < 0) {
2625 ret = -EINVAL;
2626 goto err;
2627 }
2628
2629 ret = do_kvm_create_vm(ms, type);
2630 if (ret < 0) {
2631 goto err;
2632 }
2633
2634 s->vmfd = ret;
2635
2636 s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2637 if (s->nr_as <= 1) {
2638 s->nr_as = 1;
2639 }
2640 s->as = g_new0(struct KVMAs, s->nr_as);
2641
2642 /* check the vcpu limits */
2643 soft_vcpus_limit = kvm_recommended_vcpus(s);
2644 hard_vcpus_limit = kvm_max_vcpus(s);
2645
2646 while (nc->name) {
2647 if (nc->num > soft_vcpus_limit) {
2648 warn_report("Number of %s cpus requested (%d) exceeds "
2649 "the recommended cpus supported by KVM (%d)",
2650 nc->name, nc->num, soft_vcpus_limit);
2651
2652 if (nc->num > hard_vcpus_limit) {
2653 error_report("Number of %s cpus requested (%d) exceeds "
2654 "the maximum cpus supported by KVM (%d)",
2655 nc->name, nc->num, hard_vcpus_limit);
2656 exit(1);
2657 }
2658 }
2659 nc++;
2660 }
2661
2662 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2663 if (!missing_cap) {
2664 missing_cap =
2665 kvm_check_extension_list(s, kvm_arch_required_capabilities);
2666 }
2667 if (missing_cap) {
2668 ret = -EINVAL;
2669 error_report("kvm does not support %s", missing_cap->name);
2670 error_printf("%s", upgrade_note);
2671 goto err;
2672 }
2673
2674 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2675 s->coalesced_pio = s->coalesced_mmio &&
2676 kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2677
2678 ret = kvm_setup_dirty_ring(s);
2679 if (ret < 0) {
2680 goto err;
2681 }
2682
2683 #ifdef KVM_CAP_VCPU_EVENTS
2684 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2685 #endif
2686 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2687
2688 s->irq_set_ioctl = KVM_IRQ_LINE;
2689 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2690 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2691 }
2692
2693 kvm_readonly_mem_allowed =
2694 (kvm_vm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2695
2696 kvm_resamplefds_allowed =
2697 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2698
2699 kvm_vm_attributes_allowed =
2700 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2701
2702 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2703 kvm_has_guest_debug =
2704 (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2705 #endif
2706
2707 kvm_sstep_flags = 0;
2708 if (kvm_has_guest_debug) {
2709 kvm_sstep_flags = SSTEP_ENABLE;
2710
2711 #if defined TARGET_KVM_HAVE_GUEST_DEBUG
2712 int guest_debug_flags =
2713 kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2714
2715 if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2716 kvm_sstep_flags |= SSTEP_NOIRQ;
2717 }
2718 #endif
2719 }
2720
2721 kvm_state = s;
2722
2723 ret = kvm_arch_init(ms, s);
2724 if (ret < 0) {
2725 goto err;
2726 }
2727
2728 kvm_supported_memory_attributes = kvm_vm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
2729 kvm_guest_memfd_supported =
2730 kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
2731 kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
2732 (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
2733
2734 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2735 s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2736 }
2737
2738 qemu_register_reset(kvm_unpoison_all, NULL);
2739
2740 if (s->kernel_irqchip_allowed) {
2741 kvm_irqchip_create(s);
2742 }
2743
2744 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2745 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2746 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2747 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2748
2749 kvm_memory_listener_register(s, &s->memory_listener,
2750 &address_space_memory, 0, "kvm-memory");
2751 memory_listener_register(&kvm_io_listener,
2752 &address_space_io);
2753
2754 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2755 if (!s->sync_mmu) {
2756 ret = ram_block_discard_disable(true);
2757 assert(!ret);
2758 }
2759
2760 if (s->kvm_dirty_ring_size) {
2761 kvm_dirty_ring_reaper_init(s);
2762 }
2763
2764 if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2765 add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2766 query_stats_schemas_cb);
2767 }
2768
2769 return 0;
2770
2771 err:
2772 assert(ret < 0);
2773 if (s->vmfd >= 0) {
2774 close(s->vmfd);
2775 }
2776 if (s->fd != -1) {
2777 close(s->fd);
2778 }
2779 g_free(s->as);
2780 g_free(s->memory_listener.slots);
2781
2782 return ret;
2783 }
2784
kvm_set_sigmask_len(KVMState * s,unsigned int sigmask_len)2785 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2786 {
2787 s->sigmask_len = sigmask_len;
2788 }
2789
kvm_handle_io(uint16_t port,MemTxAttrs attrs,void * data,int direction,int size,uint32_t count)2790 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2791 int size, uint32_t count)
2792 {
2793 int i;
2794 uint8_t *ptr = data;
2795
2796 for (i = 0; i < count; i++) {
2797 address_space_rw(&address_space_io, port, attrs,
2798 ptr, size,
2799 direction == KVM_EXIT_IO_OUT);
2800 ptr += size;
2801 }
2802 }
2803
kvm_handle_internal_error(CPUState * cpu,struct kvm_run * run)2804 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2805 {
2806 int i;
2807
2808 fprintf(stderr, "KVM internal error. Suberror: %d\n",
2809 run->internal.suberror);
2810
2811 for (i = 0; i < run->internal.ndata; ++i) {
2812 fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2813 i, (uint64_t)run->internal.data[i]);
2814 }
2815 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2816 fprintf(stderr, "emulation failure\n");
2817 if (!kvm_arch_stop_on_emulation_error(cpu)) {
2818 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2819 return EXCP_INTERRUPT;
2820 }
2821 }
2822 /* FIXME: Should trigger a qmp message to let management know
2823 * something went wrong.
2824 */
2825 return -1;
2826 }
2827
kvm_flush_coalesced_mmio_buffer(void)2828 void kvm_flush_coalesced_mmio_buffer(void)
2829 {
2830 KVMState *s = kvm_state;
2831
2832 if (!s || s->coalesced_flush_in_progress) {
2833 return;
2834 }
2835
2836 s->coalesced_flush_in_progress = true;
2837
2838 if (s->coalesced_mmio_ring) {
2839 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2840 while (ring->first != ring->last) {
2841 struct kvm_coalesced_mmio *ent;
2842
2843 ent = &ring->coalesced_mmio[ring->first];
2844
2845 if (ent->pio == 1) {
2846 address_space_write(&address_space_io, ent->phys_addr,
2847 MEMTXATTRS_UNSPECIFIED, ent->data,
2848 ent->len);
2849 } else {
2850 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2851 }
2852 smp_wmb();
2853 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2854 }
2855 }
2856
2857 s->coalesced_flush_in_progress = false;
2858 }
2859
do_kvm_cpu_synchronize_state(CPUState * cpu,run_on_cpu_data arg)2860 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2861 {
2862 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2863 Error *err = NULL;
2864 int ret = kvm_arch_get_registers(cpu, &err);
2865 if (ret) {
2866 if (err) {
2867 error_reportf_err(err, "Failed to synchronize CPU state: ");
2868 } else {
2869 error_report("Failed to get registers: %s", strerror(-ret));
2870 }
2871
2872 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2873 vm_stop(RUN_STATE_INTERNAL_ERROR);
2874 }
2875
2876 cpu->vcpu_dirty = true;
2877 }
2878 }
2879
kvm_cpu_synchronize_state(CPUState * cpu)2880 void kvm_cpu_synchronize_state(CPUState *cpu)
2881 {
2882 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2883 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2884 }
2885 }
2886
do_kvm_cpu_synchronize_post_reset(CPUState * cpu,run_on_cpu_data arg)2887 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2888 {
2889 Error *err = NULL;
2890 int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE, &err);
2891 if (ret) {
2892 if (err) {
2893 error_reportf_err(err, "Restoring resisters after reset: ");
2894 } else {
2895 error_report("Failed to put registers after reset: %s",
2896 strerror(-ret));
2897 }
2898 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2899 vm_stop(RUN_STATE_INTERNAL_ERROR);
2900 }
2901
2902 cpu->vcpu_dirty = false;
2903 }
2904
kvm_cpu_synchronize_post_reset(CPUState * cpu)2905 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2906 {
2907 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2908
2909 if (cpu == first_cpu) {
2910 kvm_reset_parked_vcpus(kvm_state);
2911 }
2912 }
2913
do_kvm_cpu_synchronize_post_init(CPUState * cpu,run_on_cpu_data arg)2914 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2915 {
2916 Error *err = NULL;
2917 int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE, &err);
2918 if (ret) {
2919 if (err) {
2920 error_reportf_err(err, "Putting registers after init: ");
2921 } else {
2922 error_report("Failed to put registers after init: %s",
2923 strerror(-ret));
2924 }
2925 exit(1);
2926 }
2927
2928 cpu->vcpu_dirty = false;
2929 }
2930
kvm_cpu_synchronize_post_init(CPUState * cpu)2931 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2932 {
2933 if (!kvm_state->guest_state_protected) {
2934 /*
2935 * This runs before the machine_init_done notifiers, and is the last
2936 * opportunity to synchronize the state of confidential guests.
2937 */
2938 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2939 }
2940 }
2941
do_kvm_cpu_synchronize_pre_loadvm(CPUState * cpu,run_on_cpu_data arg)2942 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2943 {
2944 cpu->vcpu_dirty = true;
2945 }
2946
kvm_cpu_synchronize_pre_loadvm(CPUState * cpu)2947 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2948 {
2949 run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2950 }
2951
2952 #ifdef KVM_HAVE_MCE_INJECTION
2953 static __thread void *pending_sigbus_addr;
2954 static __thread int pending_sigbus_code;
2955 static __thread bool have_sigbus_pending;
2956 #endif
2957
kvm_cpu_kick(CPUState * cpu)2958 static void kvm_cpu_kick(CPUState *cpu)
2959 {
2960 qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2961 }
2962
kvm_cpu_kick_self(void)2963 static void kvm_cpu_kick_self(void)
2964 {
2965 if (kvm_immediate_exit) {
2966 kvm_cpu_kick(current_cpu);
2967 } else {
2968 qemu_cpu_kick_self();
2969 }
2970 }
2971
kvm_eat_signals(CPUState * cpu)2972 static void kvm_eat_signals(CPUState *cpu)
2973 {
2974 struct timespec ts = { 0, 0 };
2975 siginfo_t siginfo;
2976 sigset_t waitset;
2977 sigset_t chkset;
2978 int r;
2979
2980 if (kvm_immediate_exit) {
2981 qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2982 /* Write kvm_run->immediate_exit before the cpu->exit_request
2983 * write in kvm_cpu_exec.
2984 */
2985 smp_wmb();
2986 return;
2987 }
2988
2989 sigemptyset(&waitset);
2990 sigaddset(&waitset, SIG_IPI);
2991
2992 do {
2993 r = sigtimedwait(&waitset, &siginfo, &ts);
2994 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2995 perror("sigtimedwait");
2996 exit(1);
2997 }
2998
2999 r = sigpending(&chkset);
3000 if (r == -1) {
3001 perror("sigpending");
3002 exit(1);
3003 }
3004 } while (sigismember(&chkset, SIG_IPI));
3005 }
3006
kvm_convert_memory(hwaddr start,hwaddr size,bool to_private)3007 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
3008 {
3009 MemoryRegionSection section;
3010 ram_addr_t offset;
3011 MemoryRegion *mr;
3012 RAMBlock *rb;
3013 void *addr;
3014 int ret = -EINVAL;
3015
3016 trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared");
3017
3018 if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) ||
3019 !QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) {
3020 return ret;
3021 }
3022
3023 if (!size) {
3024 return ret;
3025 }
3026
3027 section = memory_region_find(get_system_memory(), start, size);
3028 mr = section.mr;
3029 if (!mr) {
3030 /*
3031 * Ignore converting non-assigned region to shared.
3032 *
3033 * TDX requires vMMIO region to be shared to inject #VE to guest.
3034 * OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region,
3035 * and vIO-APIC 0xFEC00000 4K page.
3036 * OVMF assigns 32bit PCI MMIO region to
3037 * [top of low memory: typically 2GB=0xC000000, 0xFC00000)
3038 */
3039 if (!to_private) {
3040 return 0;
3041 }
3042 return ret;
3043 }
3044
3045 if (!memory_region_has_guest_memfd(mr)) {
3046 /*
3047 * Because vMMIO region must be shared, guest TD may convert vMMIO
3048 * region to shared explicitly. Don't complain such case. See
3049 * memory_region_type() for checking if the region is MMIO region.
3050 */
3051 if (!to_private &&
3052 !memory_region_is_ram(mr) &&
3053 !memory_region_is_ram_device(mr) &&
3054 !memory_region_is_rom(mr) &&
3055 !memory_region_is_romd(mr)) {
3056 ret = 0;
3057 } else {
3058 error_report("Convert non guest_memfd backed memory region "
3059 "(0x%"HWADDR_PRIx" ,+ 0x%"HWADDR_PRIx") to %s",
3060 start, size, to_private ? "private" : "shared");
3061 }
3062 goto out_unref;
3063 }
3064
3065 if (to_private) {
3066 ret = kvm_set_memory_attributes_private(start, size);
3067 } else {
3068 ret = kvm_set_memory_attributes_shared(start, size);
3069 }
3070 if (ret) {
3071 goto out_unref;
3072 }
3073
3074 addr = memory_region_get_ram_ptr(mr) + section.offset_within_region;
3075 rb = qemu_ram_block_from_host(addr, false, &offset);
3076
3077 if (to_private) {
3078 if (rb->page_size != qemu_real_host_page_size()) {
3079 /*
3080 * shared memory is backed by hugetlb, which is supposed to be
3081 * pre-allocated and doesn't need to be discarded
3082 */
3083 goto out_unref;
3084 }
3085 ret = ram_block_discard_range(rb, offset, size);
3086 } else {
3087 ret = ram_block_discard_guest_memfd_range(rb, offset, size);
3088 }
3089
3090 out_unref:
3091 memory_region_unref(mr);
3092 return ret;
3093 }
3094
kvm_cpu_exec(CPUState * cpu)3095 int kvm_cpu_exec(CPUState *cpu)
3096 {
3097 struct kvm_run *run = cpu->kvm_run;
3098 int ret, run_ret;
3099
3100 trace_kvm_cpu_exec();
3101
3102 if (kvm_arch_process_async_events(cpu)) {
3103 qatomic_set(&cpu->exit_request, 0);
3104 return EXCP_HLT;
3105 }
3106
3107 bql_unlock();
3108 cpu_exec_start(cpu);
3109
3110 do {
3111 MemTxAttrs attrs;
3112
3113 if (cpu->vcpu_dirty) {
3114 Error *err = NULL;
3115 ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE, &err);
3116 if (ret) {
3117 if (err) {
3118 error_reportf_err(err, "Putting registers after init: ");
3119 } else {
3120 error_report("Failed to put registers after init: %s",
3121 strerror(-ret));
3122 }
3123 ret = -1;
3124 break;
3125 }
3126
3127 cpu->vcpu_dirty = false;
3128 }
3129
3130 kvm_arch_pre_run(cpu, run);
3131 if (qatomic_read(&cpu->exit_request)) {
3132 trace_kvm_interrupt_exit_request();
3133 /*
3134 * KVM requires us to reenter the kernel after IO exits to complete
3135 * instruction emulation. This self-signal will ensure that we
3136 * leave ASAP again.
3137 */
3138 kvm_cpu_kick_self();
3139 }
3140
3141 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
3142 * Matching barrier in kvm_eat_signals.
3143 */
3144 smp_rmb();
3145
3146 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
3147
3148 attrs = kvm_arch_post_run(cpu, run);
3149
3150 #ifdef KVM_HAVE_MCE_INJECTION
3151 if (unlikely(have_sigbus_pending)) {
3152 bql_lock();
3153 kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
3154 pending_sigbus_addr);
3155 have_sigbus_pending = false;
3156 bql_unlock();
3157 }
3158 #endif
3159
3160 if (run_ret < 0) {
3161 if (run_ret == -EINTR || run_ret == -EAGAIN) {
3162 trace_kvm_io_window_exit();
3163 kvm_eat_signals(cpu);
3164 ret = EXCP_INTERRUPT;
3165 break;
3166 }
3167 if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) {
3168 fprintf(stderr, "error: kvm run failed %s\n",
3169 strerror(-run_ret));
3170 #ifdef TARGET_PPC
3171 if (run_ret == -EBUSY) {
3172 fprintf(stderr,
3173 "This is probably because your SMT is enabled.\n"
3174 "VCPU can only run on primary threads with all "
3175 "secondary threads offline.\n");
3176 }
3177 #endif
3178 ret = -1;
3179 break;
3180 }
3181 }
3182
3183 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
3184 switch (run->exit_reason) {
3185 case KVM_EXIT_IO:
3186 /* Called outside BQL */
3187 kvm_handle_io(run->io.port, attrs,
3188 (uint8_t *)run + run->io.data_offset,
3189 run->io.direction,
3190 run->io.size,
3191 run->io.count);
3192 ret = 0;
3193 break;
3194 case KVM_EXIT_MMIO:
3195 /* Called outside BQL */
3196 address_space_rw(&address_space_memory,
3197 run->mmio.phys_addr, attrs,
3198 run->mmio.data,
3199 run->mmio.len,
3200 run->mmio.is_write);
3201 ret = 0;
3202 break;
3203 case KVM_EXIT_IRQ_WINDOW_OPEN:
3204 ret = EXCP_INTERRUPT;
3205 break;
3206 case KVM_EXIT_SHUTDOWN:
3207 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3208 ret = EXCP_INTERRUPT;
3209 break;
3210 case KVM_EXIT_UNKNOWN:
3211 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
3212 (uint64_t)run->hw.hardware_exit_reason);
3213 ret = -1;
3214 break;
3215 case KVM_EXIT_INTERNAL_ERROR:
3216 ret = kvm_handle_internal_error(cpu, run);
3217 break;
3218 case KVM_EXIT_DIRTY_RING_FULL:
3219 /*
3220 * We shouldn't continue if the dirty ring of this vcpu is
3221 * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
3222 */
3223 trace_kvm_dirty_ring_full(cpu->cpu_index);
3224 bql_lock();
3225 /*
3226 * We throttle vCPU by making it sleep once it exit from kernel
3227 * due to dirty ring full. In the dirtylimit scenario, reaping
3228 * all vCPUs after a single vCPU dirty ring get full result in
3229 * the miss of sleep, so just reap the ring-fulled vCPU.
3230 */
3231 if (dirtylimit_in_service()) {
3232 kvm_dirty_ring_reap(kvm_state, cpu);
3233 } else {
3234 kvm_dirty_ring_reap(kvm_state, NULL);
3235 }
3236 bql_unlock();
3237 dirtylimit_vcpu_execute(cpu);
3238 ret = 0;
3239 break;
3240 case KVM_EXIT_SYSTEM_EVENT:
3241 trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type);
3242 switch (run->system_event.type) {
3243 case KVM_SYSTEM_EVENT_SHUTDOWN:
3244 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
3245 ret = EXCP_INTERRUPT;
3246 break;
3247 case KVM_SYSTEM_EVENT_RESET:
3248 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3249 ret = EXCP_INTERRUPT;
3250 break;
3251 case KVM_SYSTEM_EVENT_CRASH:
3252 kvm_cpu_synchronize_state(cpu);
3253 bql_lock();
3254 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3255 bql_unlock();
3256 ret = 0;
3257 break;
3258 default:
3259 ret = kvm_arch_handle_exit(cpu, run);
3260 break;
3261 }
3262 break;
3263 case KVM_EXIT_MEMORY_FAULT:
3264 trace_kvm_memory_fault(run->memory_fault.gpa,
3265 run->memory_fault.size,
3266 run->memory_fault.flags);
3267 if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) {
3268 error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64,
3269 (uint64_t)run->memory_fault.flags);
3270 ret = -1;
3271 break;
3272 }
3273 ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size,
3274 run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE);
3275 break;
3276 default:
3277 ret = kvm_arch_handle_exit(cpu, run);
3278 break;
3279 }
3280 } while (ret == 0);
3281
3282 cpu_exec_end(cpu);
3283 bql_lock();
3284
3285 if (ret < 0) {
3286 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
3287 vm_stop(RUN_STATE_INTERNAL_ERROR);
3288 }
3289
3290 qatomic_set(&cpu->exit_request, 0);
3291 return ret;
3292 }
3293
kvm_ioctl(KVMState * s,unsigned long type,...)3294 int kvm_ioctl(KVMState *s, unsigned long type, ...)
3295 {
3296 int ret;
3297 void *arg;
3298 va_list ap;
3299
3300 va_start(ap, type);
3301 arg = va_arg(ap, void *);
3302 va_end(ap);
3303
3304 trace_kvm_ioctl(type, arg);
3305 ret = ioctl(s->fd, type, arg);
3306 if (ret == -1) {
3307 ret = -errno;
3308 }
3309 return ret;
3310 }
3311
kvm_vm_ioctl(KVMState * s,unsigned long type,...)3312 int kvm_vm_ioctl(KVMState *s, unsigned long type, ...)
3313 {
3314 int ret;
3315 void *arg;
3316 va_list ap;
3317
3318 va_start(ap, type);
3319 arg = va_arg(ap, void *);
3320 va_end(ap);
3321
3322 trace_kvm_vm_ioctl(type, arg);
3323 accel_ioctl_begin();
3324 ret = ioctl(s->vmfd, type, arg);
3325 accel_ioctl_end();
3326 if (ret == -1) {
3327 ret = -errno;
3328 }
3329 return ret;
3330 }
3331
kvm_vcpu_ioctl(CPUState * cpu,unsigned long type,...)3332 int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...)
3333 {
3334 int ret;
3335 void *arg;
3336 va_list ap;
3337
3338 va_start(ap, type);
3339 arg = va_arg(ap, void *);
3340 va_end(ap);
3341
3342 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3343 accel_cpu_ioctl_begin(cpu);
3344 ret = ioctl(cpu->kvm_fd, type, arg);
3345 accel_cpu_ioctl_end(cpu);
3346 if (ret == -1) {
3347 ret = -errno;
3348 }
3349 return ret;
3350 }
3351
kvm_device_ioctl(int fd,unsigned long type,...)3352 int kvm_device_ioctl(int fd, unsigned long type, ...)
3353 {
3354 int ret;
3355 void *arg;
3356 va_list ap;
3357
3358 va_start(ap, type);
3359 arg = va_arg(ap, void *);
3360 va_end(ap);
3361
3362 trace_kvm_device_ioctl(fd, type, arg);
3363 accel_ioctl_begin();
3364 ret = ioctl(fd, type, arg);
3365 accel_ioctl_end();
3366 if (ret == -1) {
3367 ret = -errno;
3368 }
3369 return ret;
3370 }
3371
kvm_vm_check_attr(KVMState * s,uint32_t group,uint64_t attr)3372 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3373 {
3374 int ret;
3375 struct kvm_device_attr attribute = {
3376 .group = group,
3377 .attr = attr,
3378 };
3379
3380 if (!kvm_vm_attributes_allowed) {
3381 return 0;
3382 }
3383
3384 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3385 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3386 return ret ? 0 : 1;
3387 }
3388
kvm_device_check_attr(int dev_fd,uint32_t group,uint64_t attr)3389 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3390 {
3391 struct kvm_device_attr attribute = {
3392 .group = group,
3393 .attr = attr,
3394 .flags = 0,
3395 };
3396
3397 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3398 }
3399
kvm_device_access(int fd,int group,uint64_t attr,void * val,bool write,Error ** errp)3400 int kvm_device_access(int fd, int group, uint64_t attr,
3401 void *val, bool write, Error **errp)
3402 {
3403 struct kvm_device_attr kvmattr;
3404 int err;
3405
3406 kvmattr.flags = 0;
3407 kvmattr.group = group;
3408 kvmattr.attr = attr;
3409 kvmattr.addr = (uintptr_t)val;
3410
3411 err = kvm_device_ioctl(fd,
3412 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3413 &kvmattr);
3414 if (err < 0) {
3415 error_setg_errno(errp, -err,
3416 "KVM_%s_DEVICE_ATTR failed: Group %d "
3417 "attr 0x%016" PRIx64,
3418 write ? "SET" : "GET", group, attr);
3419 }
3420 return err;
3421 }
3422
kvm_has_sync_mmu(void)3423 bool kvm_has_sync_mmu(void)
3424 {
3425 return kvm_state->sync_mmu;
3426 }
3427
kvm_has_vcpu_events(void)3428 int kvm_has_vcpu_events(void)
3429 {
3430 return kvm_state->vcpu_events;
3431 }
3432
kvm_max_nested_state_length(void)3433 int kvm_max_nested_state_length(void)
3434 {
3435 return kvm_state->max_nested_state_len;
3436 }
3437
kvm_has_gsi_routing(void)3438 int kvm_has_gsi_routing(void)
3439 {
3440 #ifdef KVM_CAP_IRQ_ROUTING
3441 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3442 #else
3443 return false;
3444 #endif
3445 }
3446
kvm_arm_supports_user_irq(void)3447 bool kvm_arm_supports_user_irq(void)
3448 {
3449 return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3450 }
3451
3452 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
kvm_find_sw_breakpoint(CPUState * cpu,vaddr pc)3453 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
3454 {
3455 struct kvm_sw_breakpoint *bp;
3456
3457 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3458 if (bp->pc == pc) {
3459 return bp;
3460 }
3461 }
3462 return NULL;
3463 }
3464
kvm_sw_breakpoints_active(CPUState * cpu)3465 int kvm_sw_breakpoints_active(CPUState *cpu)
3466 {
3467 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3468 }
3469
3470 struct kvm_set_guest_debug_data {
3471 struct kvm_guest_debug dbg;
3472 int err;
3473 };
3474
kvm_invoke_set_guest_debug(CPUState * cpu,run_on_cpu_data data)3475 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3476 {
3477 struct kvm_set_guest_debug_data *dbg_data =
3478 (struct kvm_set_guest_debug_data *) data.host_ptr;
3479
3480 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3481 &dbg_data->dbg);
3482 }
3483
kvm_update_guest_debug(CPUState * cpu,unsigned long reinject_trap)3484 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3485 {
3486 struct kvm_set_guest_debug_data data;
3487
3488 data.dbg.control = reinject_trap;
3489
3490 if (cpu->singlestep_enabled) {
3491 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3492
3493 if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3494 data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3495 }
3496 }
3497 kvm_arch_update_guest_debug(cpu, &data.dbg);
3498
3499 run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3500 RUN_ON_CPU_HOST_PTR(&data));
3501 return data.err;
3502 }
3503
kvm_supports_guest_debug(void)3504 bool kvm_supports_guest_debug(void)
3505 {
3506 /* probed during kvm_init() */
3507 return kvm_has_guest_debug;
3508 }
3509
kvm_insert_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3510 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3511 {
3512 struct kvm_sw_breakpoint *bp;
3513 int err;
3514
3515 if (type == GDB_BREAKPOINT_SW) {
3516 bp = kvm_find_sw_breakpoint(cpu, addr);
3517 if (bp) {
3518 bp->use_count++;
3519 return 0;
3520 }
3521
3522 bp = g_new(struct kvm_sw_breakpoint, 1);
3523 bp->pc = addr;
3524 bp->use_count = 1;
3525 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3526 if (err) {
3527 g_free(bp);
3528 return err;
3529 }
3530
3531 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3532 } else {
3533 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3534 if (err) {
3535 return err;
3536 }
3537 }
3538
3539 CPU_FOREACH(cpu) {
3540 err = kvm_update_guest_debug(cpu, 0);
3541 if (err) {
3542 return err;
3543 }
3544 }
3545 return 0;
3546 }
3547
kvm_remove_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3548 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3549 {
3550 struct kvm_sw_breakpoint *bp;
3551 int err;
3552
3553 if (type == GDB_BREAKPOINT_SW) {
3554 bp = kvm_find_sw_breakpoint(cpu, addr);
3555 if (!bp) {
3556 return -ENOENT;
3557 }
3558
3559 if (bp->use_count > 1) {
3560 bp->use_count--;
3561 return 0;
3562 }
3563
3564 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3565 if (err) {
3566 return err;
3567 }
3568
3569 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3570 g_free(bp);
3571 } else {
3572 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3573 if (err) {
3574 return err;
3575 }
3576 }
3577
3578 CPU_FOREACH(cpu) {
3579 err = kvm_update_guest_debug(cpu, 0);
3580 if (err) {
3581 return err;
3582 }
3583 }
3584 return 0;
3585 }
3586
kvm_remove_all_breakpoints(CPUState * cpu)3587 void kvm_remove_all_breakpoints(CPUState *cpu)
3588 {
3589 struct kvm_sw_breakpoint *bp, *next;
3590 KVMState *s = cpu->kvm_state;
3591 CPUState *tmpcpu;
3592
3593 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3594 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3595 /* Try harder to find a CPU that currently sees the breakpoint. */
3596 CPU_FOREACH(tmpcpu) {
3597 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3598 break;
3599 }
3600 }
3601 }
3602 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3603 g_free(bp);
3604 }
3605 kvm_arch_remove_all_hw_breakpoints();
3606
3607 CPU_FOREACH(cpu) {
3608 kvm_update_guest_debug(cpu, 0);
3609 }
3610 }
3611
3612 #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */
3613
kvm_set_signal_mask(CPUState * cpu,const sigset_t * sigset)3614 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3615 {
3616 KVMState *s = kvm_state;
3617 struct kvm_signal_mask *sigmask;
3618 int r;
3619
3620 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3621
3622 sigmask->len = s->sigmask_len;
3623 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3624 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3625 g_free(sigmask);
3626
3627 return r;
3628 }
3629
kvm_ipi_signal(int sig)3630 static void kvm_ipi_signal(int sig)
3631 {
3632 if (current_cpu) {
3633 assert(kvm_immediate_exit);
3634 kvm_cpu_kick(current_cpu);
3635 }
3636 }
3637
kvm_init_cpu_signals(CPUState * cpu)3638 void kvm_init_cpu_signals(CPUState *cpu)
3639 {
3640 int r;
3641 sigset_t set;
3642 struct sigaction sigact;
3643
3644 memset(&sigact, 0, sizeof(sigact));
3645 sigact.sa_handler = kvm_ipi_signal;
3646 sigaction(SIG_IPI, &sigact, NULL);
3647
3648 pthread_sigmask(SIG_BLOCK, NULL, &set);
3649 #if defined KVM_HAVE_MCE_INJECTION
3650 sigdelset(&set, SIGBUS);
3651 pthread_sigmask(SIG_SETMASK, &set, NULL);
3652 #endif
3653 sigdelset(&set, SIG_IPI);
3654 if (kvm_immediate_exit) {
3655 r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3656 } else {
3657 r = kvm_set_signal_mask(cpu, &set);
3658 }
3659 if (r) {
3660 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3661 exit(1);
3662 }
3663 }
3664
3665 /* Called asynchronously in VCPU thread. */
kvm_on_sigbus_vcpu(CPUState * cpu,int code,void * addr)3666 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3667 {
3668 #ifdef KVM_HAVE_MCE_INJECTION
3669 if (have_sigbus_pending) {
3670 return 1;
3671 }
3672 have_sigbus_pending = true;
3673 pending_sigbus_addr = addr;
3674 pending_sigbus_code = code;
3675 qatomic_set(&cpu->exit_request, 1);
3676 return 0;
3677 #else
3678 return 1;
3679 #endif
3680 }
3681
3682 /* Called synchronously (via signalfd) in main thread. */
kvm_on_sigbus(int code,void * addr)3683 int kvm_on_sigbus(int code, void *addr)
3684 {
3685 #ifdef KVM_HAVE_MCE_INJECTION
3686 /* Action required MCE kills the process if SIGBUS is blocked. Because
3687 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3688 * we can only get action optional here.
3689 */
3690 assert(code != BUS_MCEERR_AR);
3691 kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3692 return 0;
3693 #else
3694 return 1;
3695 #endif
3696 }
3697
kvm_create_device(KVMState * s,uint64_t type,bool test)3698 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3699 {
3700 int ret;
3701 struct kvm_create_device create_dev;
3702
3703 create_dev.type = type;
3704 create_dev.fd = -1;
3705 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3706
3707 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3708 return -ENOTSUP;
3709 }
3710
3711 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3712 if (ret) {
3713 return ret;
3714 }
3715
3716 return test ? 0 : create_dev.fd;
3717 }
3718
kvm_device_supported(int vmfd,uint64_t type)3719 bool kvm_device_supported(int vmfd, uint64_t type)
3720 {
3721 struct kvm_create_device create_dev = {
3722 .type = type,
3723 .fd = -1,
3724 .flags = KVM_CREATE_DEVICE_TEST,
3725 };
3726
3727 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3728 return false;
3729 }
3730
3731 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3732 }
3733
kvm_set_one_reg(CPUState * cs,uint64_t id,void * source)3734 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3735 {
3736 struct kvm_one_reg reg;
3737 int r;
3738
3739 reg.id = id;
3740 reg.addr = (uintptr_t) source;
3741 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
3742 if (r) {
3743 trace_kvm_failed_reg_set(id, strerror(-r));
3744 }
3745 return r;
3746 }
3747
kvm_get_one_reg(CPUState * cs,uint64_t id,void * target)3748 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3749 {
3750 struct kvm_one_reg reg;
3751 int r;
3752
3753 reg.id = id;
3754 reg.addr = (uintptr_t) target;
3755 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
3756 if (r) {
3757 trace_kvm_failed_reg_get(id, strerror(-r));
3758 }
3759 return r;
3760 }
3761
kvm_accel_has_memory(MachineState * ms,AddressSpace * as,hwaddr start_addr,hwaddr size)3762 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3763 hwaddr start_addr, hwaddr size)
3764 {
3765 KVMState *kvm = KVM_STATE(ms->accelerator);
3766 int i;
3767
3768 for (i = 0; i < kvm->nr_as; ++i) {
3769 if (kvm->as[i].as == as && kvm->as[i].ml) {
3770 size = MIN(kvm_max_slot_size, size);
3771 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3772 start_addr, size);
3773 }
3774 }
3775
3776 return false;
3777 }
3778
kvm_get_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3779 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3780 const char *name, void *opaque,
3781 Error **errp)
3782 {
3783 KVMState *s = KVM_STATE(obj);
3784 int64_t value = s->kvm_shadow_mem;
3785
3786 visit_type_int(v, name, &value, errp);
3787 }
3788
kvm_set_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3789 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3790 const char *name, void *opaque,
3791 Error **errp)
3792 {
3793 KVMState *s = KVM_STATE(obj);
3794 int64_t value;
3795
3796 if (s->fd != -1) {
3797 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3798 return;
3799 }
3800
3801 if (!visit_type_int(v, name, &value, errp)) {
3802 return;
3803 }
3804
3805 s->kvm_shadow_mem = value;
3806 }
3807
kvm_set_kernel_irqchip(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3808 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3809 const char *name, void *opaque,
3810 Error **errp)
3811 {
3812 KVMState *s = KVM_STATE(obj);
3813 OnOffSplit mode;
3814
3815 if (s->fd != -1) {
3816 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3817 return;
3818 }
3819
3820 if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3821 return;
3822 }
3823 switch (mode) {
3824 case ON_OFF_SPLIT_ON:
3825 s->kernel_irqchip_allowed = true;
3826 s->kernel_irqchip_required = true;
3827 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3828 break;
3829 case ON_OFF_SPLIT_OFF:
3830 s->kernel_irqchip_allowed = false;
3831 s->kernel_irqchip_required = false;
3832 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3833 break;
3834 case ON_OFF_SPLIT_SPLIT:
3835 s->kernel_irqchip_allowed = true;
3836 s->kernel_irqchip_required = true;
3837 s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3838 break;
3839 default:
3840 /* The value was checked in visit_type_OnOffSplit() above. If
3841 * we get here, then something is wrong in QEMU.
3842 */
3843 abort();
3844 }
3845 }
3846
kvm_kernel_irqchip_allowed(void)3847 bool kvm_kernel_irqchip_allowed(void)
3848 {
3849 return kvm_state->kernel_irqchip_allowed;
3850 }
3851
kvm_kernel_irqchip_required(void)3852 bool kvm_kernel_irqchip_required(void)
3853 {
3854 return kvm_state->kernel_irqchip_required;
3855 }
3856
kvm_kernel_irqchip_split(void)3857 bool kvm_kernel_irqchip_split(void)
3858 {
3859 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3860 }
3861
kvm_get_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3862 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3863 const char *name, void *opaque,
3864 Error **errp)
3865 {
3866 KVMState *s = KVM_STATE(obj);
3867 uint32_t value = s->kvm_dirty_ring_size;
3868
3869 visit_type_uint32(v, name, &value, errp);
3870 }
3871
kvm_set_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3872 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3873 const char *name, void *opaque,
3874 Error **errp)
3875 {
3876 KVMState *s = KVM_STATE(obj);
3877 uint32_t value;
3878
3879 if (s->fd != -1) {
3880 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3881 return;
3882 }
3883
3884 if (!visit_type_uint32(v, name, &value, errp)) {
3885 return;
3886 }
3887 if (value & (value - 1)) {
3888 error_setg(errp, "dirty-ring-size must be a power of two.");
3889 return;
3890 }
3891
3892 s->kvm_dirty_ring_size = value;
3893 }
3894
kvm_get_device(Object * obj,Error ** errp G_GNUC_UNUSED)3895 static char *kvm_get_device(Object *obj,
3896 Error **errp G_GNUC_UNUSED)
3897 {
3898 KVMState *s = KVM_STATE(obj);
3899
3900 return g_strdup(s->device);
3901 }
3902
kvm_set_device(Object * obj,const char * value,Error ** errp G_GNUC_UNUSED)3903 static void kvm_set_device(Object *obj,
3904 const char *value,
3905 Error **errp G_GNUC_UNUSED)
3906 {
3907 KVMState *s = KVM_STATE(obj);
3908
3909 g_free(s->device);
3910 s->device = g_strdup(value);
3911 }
3912
kvm_set_kvm_rapl(Object * obj,bool value,Error ** errp)3913 static void kvm_set_kvm_rapl(Object *obj, bool value, Error **errp)
3914 {
3915 KVMState *s = KVM_STATE(obj);
3916 s->msr_energy.enable = value;
3917 }
3918
kvm_set_kvm_rapl_socket_path(Object * obj,const char * str,Error ** errp)3919 static void kvm_set_kvm_rapl_socket_path(Object *obj,
3920 const char *str,
3921 Error **errp)
3922 {
3923 KVMState *s = KVM_STATE(obj);
3924 g_free(s->msr_energy.socket_path);
3925 s->msr_energy.socket_path = g_strdup(str);
3926 }
3927
kvm_accel_instance_init(Object * obj)3928 static void kvm_accel_instance_init(Object *obj)
3929 {
3930 KVMState *s = KVM_STATE(obj);
3931
3932 s->fd = -1;
3933 s->vmfd = -1;
3934 s->kvm_shadow_mem = -1;
3935 s->kernel_irqchip_allowed = true;
3936 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3937 /* KVM dirty ring is by default off */
3938 s->kvm_dirty_ring_size = 0;
3939 s->kvm_dirty_ring_with_bitmap = false;
3940 s->kvm_eager_split_size = 0;
3941 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
3942 s->notify_window = 0;
3943 s->xen_version = 0;
3944 s->xen_gnttab_max_frames = 64;
3945 s->xen_evtchn_max_pirq = 256;
3946 s->device = NULL;
3947 s->msr_energy.enable = false;
3948 }
3949
3950 /**
3951 * kvm_gdbstub_sstep_flags():
3952 *
3953 * Returns: SSTEP_* flags that KVM supports for guest debug. The
3954 * support is probed during kvm_init()
3955 */
kvm_gdbstub_sstep_flags(void)3956 static int kvm_gdbstub_sstep_flags(void)
3957 {
3958 return kvm_sstep_flags;
3959 }
3960
kvm_accel_class_init(ObjectClass * oc,void * data)3961 static void kvm_accel_class_init(ObjectClass *oc, void *data)
3962 {
3963 AccelClass *ac = ACCEL_CLASS(oc);
3964 ac->name = "KVM";
3965 ac->init_machine = kvm_init;
3966 ac->has_memory = kvm_accel_has_memory;
3967 ac->allowed = &kvm_allowed;
3968 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
3969
3970 object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3971 NULL, kvm_set_kernel_irqchip,
3972 NULL, NULL);
3973 object_class_property_set_description(oc, "kernel-irqchip",
3974 "Configure KVM in-kernel irqchip");
3975
3976 object_class_property_add(oc, "kvm-shadow-mem", "int",
3977 kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3978 NULL, NULL);
3979 object_class_property_set_description(oc, "kvm-shadow-mem",
3980 "KVM shadow MMU size");
3981
3982 object_class_property_add(oc, "dirty-ring-size", "uint32",
3983 kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
3984 NULL, NULL);
3985 object_class_property_set_description(oc, "dirty-ring-size",
3986 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3987
3988 object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device);
3989 object_class_property_set_description(oc, "device",
3990 "Path to the device node to use (default: /dev/kvm)");
3991
3992 object_class_property_add_bool(oc, "rapl",
3993 NULL,
3994 kvm_set_kvm_rapl);
3995 object_class_property_set_description(oc, "rapl",
3996 "Allow energy related MSRs for RAPL interface in Guest");
3997
3998 object_class_property_add_str(oc, "rapl-helper-socket", NULL,
3999 kvm_set_kvm_rapl_socket_path);
4000 object_class_property_set_description(oc, "rapl-helper-socket",
4001 "Socket Path for comminucating with the Virtual MSR helper daemon");
4002
4003 kvm_arch_accel_class_init(oc);
4004 }
4005
4006 static const TypeInfo kvm_accel_type = {
4007 .name = TYPE_KVM_ACCEL,
4008 .parent = TYPE_ACCEL,
4009 .instance_init = kvm_accel_instance_init,
4010 .class_init = kvm_accel_class_init,
4011 .instance_size = sizeof(KVMState),
4012 };
4013
kvm_type_init(void)4014 static void kvm_type_init(void)
4015 {
4016 type_register_static(&kvm_accel_type);
4017 }
4018
4019 type_init(kvm_type_init);
4020
4021 typedef struct StatsArgs {
4022 union StatsResultsType {
4023 StatsResultList **stats;
4024 StatsSchemaList **schema;
4025 } result;
4026 strList *names;
4027 Error **errp;
4028 } StatsArgs;
4029
add_kvmstat_entry(struct kvm_stats_desc * pdesc,uint64_t * stats_data,StatsList * stats_list,Error ** errp)4030 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
4031 uint64_t *stats_data,
4032 StatsList *stats_list,
4033 Error **errp)
4034 {
4035
4036 Stats *stats;
4037 uint64List *val_list = NULL;
4038
4039 /* Only add stats that we understand. */
4040 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
4041 case KVM_STATS_TYPE_CUMULATIVE:
4042 case KVM_STATS_TYPE_INSTANT:
4043 case KVM_STATS_TYPE_PEAK:
4044 case KVM_STATS_TYPE_LINEAR_HIST:
4045 case KVM_STATS_TYPE_LOG_HIST:
4046 break;
4047 default:
4048 return stats_list;
4049 }
4050
4051 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
4052 case KVM_STATS_UNIT_NONE:
4053 case KVM_STATS_UNIT_BYTES:
4054 case KVM_STATS_UNIT_CYCLES:
4055 case KVM_STATS_UNIT_SECONDS:
4056 case KVM_STATS_UNIT_BOOLEAN:
4057 break;
4058 default:
4059 return stats_list;
4060 }
4061
4062 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4063 case KVM_STATS_BASE_POW10:
4064 case KVM_STATS_BASE_POW2:
4065 break;
4066 default:
4067 return stats_list;
4068 }
4069
4070 /* Alloc and populate data list */
4071 stats = g_new0(Stats, 1);
4072 stats->name = g_strdup(pdesc->name);
4073 stats->value = g_new0(StatsValue, 1);
4074
4075 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
4076 stats->value->u.boolean = *stats_data;
4077 stats->value->type = QTYPE_QBOOL;
4078 } else if (pdesc->size == 1) {
4079 stats->value->u.scalar = *stats_data;
4080 stats->value->type = QTYPE_QNUM;
4081 } else {
4082 int i;
4083 for (i = 0; i < pdesc->size; i++) {
4084 QAPI_LIST_PREPEND(val_list, stats_data[i]);
4085 }
4086 stats->value->u.list = val_list;
4087 stats->value->type = QTYPE_QLIST;
4088 }
4089
4090 QAPI_LIST_PREPEND(stats_list, stats);
4091 return stats_list;
4092 }
4093
add_kvmschema_entry(struct kvm_stats_desc * pdesc,StatsSchemaValueList * list,Error ** errp)4094 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
4095 StatsSchemaValueList *list,
4096 Error **errp)
4097 {
4098 StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
4099 schema_entry->value = g_new0(StatsSchemaValue, 1);
4100
4101 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
4102 case KVM_STATS_TYPE_CUMULATIVE:
4103 schema_entry->value->type = STATS_TYPE_CUMULATIVE;
4104 break;
4105 case KVM_STATS_TYPE_INSTANT:
4106 schema_entry->value->type = STATS_TYPE_INSTANT;
4107 break;
4108 case KVM_STATS_TYPE_PEAK:
4109 schema_entry->value->type = STATS_TYPE_PEAK;
4110 break;
4111 case KVM_STATS_TYPE_LINEAR_HIST:
4112 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
4113 schema_entry->value->bucket_size = pdesc->bucket_size;
4114 schema_entry->value->has_bucket_size = true;
4115 break;
4116 case KVM_STATS_TYPE_LOG_HIST:
4117 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
4118 break;
4119 default:
4120 goto exit;
4121 }
4122
4123 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
4124 case KVM_STATS_UNIT_NONE:
4125 break;
4126 case KVM_STATS_UNIT_BOOLEAN:
4127 schema_entry->value->has_unit = true;
4128 schema_entry->value->unit = STATS_UNIT_BOOLEAN;
4129 break;
4130 case KVM_STATS_UNIT_BYTES:
4131 schema_entry->value->has_unit = true;
4132 schema_entry->value->unit = STATS_UNIT_BYTES;
4133 break;
4134 case KVM_STATS_UNIT_CYCLES:
4135 schema_entry->value->has_unit = true;
4136 schema_entry->value->unit = STATS_UNIT_CYCLES;
4137 break;
4138 case KVM_STATS_UNIT_SECONDS:
4139 schema_entry->value->has_unit = true;
4140 schema_entry->value->unit = STATS_UNIT_SECONDS;
4141 break;
4142 default:
4143 goto exit;
4144 }
4145
4146 schema_entry->value->exponent = pdesc->exponent;
4147 if (pdesc->exponent) {
4148 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4149 case KVM_STATS_BASE_POW10:
4150 schema_entry->value->has_base = true;
4151 schema_entry->value->base = 10;
4152 break;
4153 case KVM_STATS_BASE_POW2:
4154 schema_entry->value->has_base = true;
4155 schema_entry->value->base = 2;
4156 break;
4157 default:
4158 goto exit;
4159 }
4160 }
4161
4162 schema_entry->value->name = g_strdup(pdesc->name);
4163 schema_entry->next = list;
4164 return schema_entry;
4165 exit:
4166 g_free(schema_entry->value);
4167 g_free(schema_entry);
4168 return list;
4169 }
4170
4171 /* Cached stats descriptors */
4172 typedef struct StatsDescriptors {
4173 const char *ident; /* cache key, currently the StatsTarget */
4174 struct kvm_stats_desc *kvm_stats_desc;
4175 struct kvm_stats_header kvm_stats_header;
4176 QTAILQ_ENTRY(StatsDescriptors) next;
4177 } StatsDescriptors;
4178
4179 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
4180 QTAILQ_HEAD_INITIALIZER(stats_descriptors);
4181
4182 /*
4183 * Return the descriptors for 'target', that either have already been read
4184 * or are retrieved from 'stats_fd'.
4185 */
find_stats_descriptors(StatsTarget target,int stats_fd,Error ** errp)4186 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
4187 Error **errp)
4188 {
4189 StatsDescriptors *descriptors;
4190 const char *ident;
4191 struct kvm_stats_desc *kvm_stats_desc;
4192 struct kvm_stats_header *kvm_stats_header;
4193 size_t size_desc;
4194 ssize_t ret;
4195
4196 ident = StatsTarget_str(target);
4197 QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
4198 if (g_str_equal(descriptors->ident, ident)) {
4199 return descriptors;
4200 }
4201 }
4202
4203 descriptors = g_new0(StatsDescriptors, 1);
4204
4205 /* Read stats header */
4206 kvm_stats_header = &descriptors->kvm_stats_header;
4207 ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
4208 if (ret != sizeof(*kvm_stats_header)) {
4209 error_setg(errp, "KVM stats: failed to read stats header: "
4210 "expected %zu actual %zu",
4211 sizeof(*kvm_stats_header), ret);
4212 g_free(descriptors);
4213 return NULL;
4214 }
4215 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4216
4217 /* Read stats descriptors */
4218 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
4219 ret = pread(stats_fd, kvm_stats_desc,
4220 size_desc * kvm_stats_header->num_desc,
4221 kvm_stats_header->desc_offset);
4222
4223 if (ret != size_desc * kvm_stats_header->num_desc) {
4224 error_setg(errp, "KVM stats: failed to read stats descriptors: "
4225 "expected %zu actual %zu",
4226 size_desc * kvm_stats_header->num_desc, ret);
4227 g_free(descriptors);
4228 g_free(kvm_stats_desc);
4229 return NULL;
4230 }
4231 descriptors->kvm_stats_desc = kvm_stats_desc;
4232 descriptors->ident = ident;
4233 QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
4234 return descriptors;
4235 }
4236
query_stats(StatsResultList ** result,StatsTarget target,strList * names,int stats_fd,CPUState * cpu,Error ** errp)4237 static void query_stats(StatsResultList **result, StatsTarget target,
4238 strList *names, int stats_fd, CPUState *cpu,
4239 Error **errp)
4240 {
4241 struct kvm_stats_desc *kvm_stats_desc;
4242 struct kvm_stats_header *kvm_stats_header;
4243 StatsDescriptors *descriptors;
4244 g_autofree uint64_t *stats_data = NULL;
4245 struct kvm_stats_desc *pdesc;
4246 StatsList *stats_list = NULL;
4247 size_t size_desc, size_data = 0;
4248 ssize_t ret;
4249 int i;
4250
4251 descriptors = find_stats_descriptors(target, stats_fd, errp);
4252 if (!descriptors) {
4253 return;
4254 }
4255
4256 kvm_stats_header = &descriptors->kvm_stats_header;
4257 kvm_stats_desc = descriptors->kvm_stats_desc;
4258 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4259
4260 /* Tally the total data size; read schema data */
4261 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4262 pdesc = (void *)kvm_stats_desc + i * size_desc;
4263 size_data += pdesc->size * sizeof(*stats_data);
4264 }
4265
4266 stats_data = g_malloc0(size_data);
4267 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
4268
4269 if (ret != size_data) {
4270 error_setg(errp, "KVM stats: failed to read data: "
4271 "expected %zu actual %zu", size_data, ret);
4272 return;
4273 }
4274
4275 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4276 uint64_t *stats;
4277 pdesc = (void *)kvm_stats_desc + i * size_desc;
4278
4279 /* Add entry to the list */
4280 stats = (void *)stats_data + pdesc->offset;
4281 if (!apply_str_list_filter(pdesc->name, names)) {
4282 continue;
4283 }
4284 stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
4285 }
4286
4287 if (!stats_list) {
4288 return;
4289 }
4290
4291 switch (target) {
4292 case STATS_TARGET_VM:
4293 add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
4294 break;
4295 case STATS_TARGET_VCPU:
4296 add_stats_entry(result, STATS_PROVIDER_KVM,
4297 cpu->parent_obj.canonical_path,
4298 stats_list);
4299 break;
4300 default:
4301 g_assert_not_reached();
4302 }
4303 }
4304
query_stats_schema(StatsSchemaList ** result,StatsTarget target,int stats_fd,Error ** errp)4305 static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
4306 int stats_fd, Error **errp)
4307 {
4308 struct kvm_stats_desc *kvm_stats_desc;
4309 struct kvm_stats_header *kvm_stats_header;
4310 StatsDescriptors *descriptors;
4311 struct kvm_stats_desc *pdesc;
4312 StatsSchemaValueList *stats_list = NULL;
4313 size_t size_desc;
4314 int i;
4315
4316 descriptors = find_stats_descriptors(target, stats_fd, errp);
4317 if (!descriptors) {
4318 return;
4319 }
4320
4321 kvm_stats_header = &descriptors->kvm_stats_header;
4322 kvm_stats_desc = descriptors->kvm_stats_desc;
4323 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4324
4325 /* Tally the total data size; read schema data */
4326 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4327 pdesc = (void *)kvm_stats_desc + i * size_desc;
4328 stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
4329 }
4330
4331 add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
4332 }
4333
query_stats_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4334 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4335 {
4336 int stats_fd = cpu->kvm_vcpu_stats_fd;
4337 Error *local_err = NULL;
4338
4339 if (stats_fd == -1) {
4340 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4341 error_propagate(kvm_stats_args->errp, local_err);
4342 return;
4343 }
4344 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4345 kvm_stats_args->names, stats_fd, cpu,
4346 kvm_stats_args->errp);
4347 }
4348
query_stats_schema_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4349 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4350 {
4351 int stats_fd = cpu->kvm_vcpu_stats_fd;
4352 Error *local_err = NULL;
4353
4354 if (stats_fd == -1) {
4355 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4356 error_propagate(kvm_stats_args->errp, local_err);
4357 return;
4358 }
4359 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4360 kvm_stats_args->errp);
4361 }
4362
query_stats_cb(StatsResultList ** result,StatsTarget target,strList * names,strList * targets,Error ** errp)4363 static void query_stats_cb(StatsResultList **result, StatsTarget target,
4364 strList *names, strList *targets, Error **errp)
4365 {
4366 KVMState *s = kvm_state;
4367 CPUState *cpu;
4368 int stats_fd;
4369
4370 switch (target) {
4371 case STATS_TARGET_VM:
4372 {
4373 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4374 if (stats_fd == -1) {
4375 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4376 return;
4377 }
4378 query_stats(result, target, names, stats_fd, NULL, errp);
4379 close(stats_fd);
4380 break;
4381 }
4382 case STATS_TARGET_VCPU:
4383 {
4384 StatsArgs stats_args;
4385 stats_args.result.stats = result;
4386 stats_args.names = names;
4387 stats_args.errp = errp;
4388 CPU_FOREACH(cpu) {
4389 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4390 continue;
4391 }
4392 query_stats_vcpu(cpu, &stats_args);
4393 }
4394 break;
4395 }
4396 default:
4397 break;
4398 }
4399 }
4400
query_stats_schemas_cb(StatsSchemaList ** result,Error ** errp)4401 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4402 {
4403 StatsArgs stats_args;
4404 KVMState *s = kvm_state;
4405 int stats_fd;
4406
4407 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4408 if (stats_fd == -1) {
4409 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4410 return;
4411 }
4412 query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4413 close(stats_fd);
4414
4415 if (first_cpu) {
4416 stats_args.result.schema = result;
4417 stats_args.errp = errp;
4418 query_stats_schema_vcpu(first_cpu, &stats_args);
4419 }
4420 }
4421
kvm_mark_guest_state_protected(void)4422 void kvm_mark_guest_state_protected(void)
4423 {
4424 kvm_state->guest_state_protected = true;
4425 }
4426
kvm_create_guest_memfd(uint64_t size,uint64_t flags,Error ** errp)4427 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
4428 {
4429 int fd;
4430 struct kvm_create_guest_memfd guest_memfd = {
4431 .size = size,
4432 .flags = flags,
4433 };
4434
4435 if (!kvm_guest_memfd_supported) {
4436 error_setg(errp, "KVM does not support guest_memfd");
4437 return -1;
4438 }
4439
4440 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
4441 if (fd < 0) {
4442 error_setg_errno(errp, errno, "Error creating KVM guest_memfd");
4443 return -1;
4444 }
4445
4446 return fd;
4447 }
4448