xref: /openbmc/qemu/target/arm/kvm.c (revision 259ebed4)
1 /*
2  * ARM implementation of KVM hooks
3  *
4  * Copyright Christoffer Dall 2009-2010
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 
11 #include "qemu/osdep.h"
12 #include <sys/ioctl.h>
13 
14 #include <linux/kvm.h>
15 
16 #include "qemu/timer.h"
17 #include "qemu/error-report.h"
18 #include "qemu/main-loop.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/kvm.h"
23 #include "sysemu/kvm_int.h"
24 #include "kvm_arm.h"
25 #include "cpu.h"
26 #include "trace.h"
27 #include "internals.h"
28 #include "hw/pci/pci.h"
29 #include "exec/memattrs.h"
30 #include "exec/address-spaces.h"
31 #include "hw/boards.h"
32 #include "hw/irq.h"
33 #include "qapi/visitor.h"
34 #include "qemu/log.h"
35 
36 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
37     KVM_CAP_LAST_INFO
38 };
39 
40 static bool cap_has_mp_state;
41 static bool cap_has_inject_serror_esr;
42 static bool cap_has_inject_ext_dabt;
43 
44 static ARMHostCPUFeatures arm_host_cpu_features;
45 
46 int kvm_arm_vcpu_init(CPUState *cs)
47 {
48     ARMCPU *cpu = ARM_CPU(cs);
49     struct kvm_vcpu_init init;
50 
51     init.target = cpu->kvm_target;
52     memcpy(init.features, cpu->kvm_init_features, sizeof(init.features));
53 
54     return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
55 }
56 
57 int kvm_arm_vcpu_finalize(CPUState *cs, int feature)
58 {
59     return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature);
60 }
61 
62 void kvm_arm_init_serror_injection(CPUState *cs)
63 {
64     cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
65                                     KVM_CAP_ARM_INJECT_SERROR_ESR);
66 }
67 
68 bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
69                                       int *fdarray,
70                                       struct kvm_vcpu_init *init)
71 {
72     int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
73     int max_vm_pa_size;
74 
75     kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
76     if (kvmfd < 0) {
77         goto err;
78     }
79     max_vm_pa_size = ioctl(kvmfd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE);
80     if (max_vm_pa_size < 0) {
81         max_vm_pa_size = 0;
82     }
83     do {
84         vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
85     } while (vmfd == -1 && errno == EINTR);
86     if (vmfd < 0) {
87         goto err;
88     }
89     cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
90     if (cpufd < 0) {
91         goto err;
92     }
93 
94     if (!init) {
95         /* Caller doesn't want the VCPU to be initialized, so skip it */
96         goto finish;
97     }
98 
99     if (init->target == -1) {
100         struct kvm_vcpu_init preferred;
101 
102         ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
103         if (!ret) {
104             init->target = preferred.target;
105         }
106     }
107     if (ret >= 0) {
108         ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
109         if (ret < 0) {
110             goto err;
111         }
112     } else if (cpus_to_try) {
113         /* Old kernel which doesn't know about the
114          * PREFERRED_TARGET ioctl: we know it will only support
115          * creating one kind of guest CPU which is its preferred
116          * CPU type.
117          */
118         struct kvm_vcpu_init try;
119 
120         while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
121             try.target = *cpus_to_try++;
122             memcpy(try.features, init->features, sizeof(init->features));
123             ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
124             if (ret >= 0) {
125                 break;
126             }
127         }
128         if (ret < 0) {
129             goto err;
130         }
131         init->target = try.target;
132     } else {
133         /* Treat a NULL cpus_to_try argument the same as an empty
134          * list, which means we will fail the call since this must
135          * be an old kernel which doesn't support PREFERRED_TARGET.
136          */
137         goto err;
138     }
139 
140 finish:
141     fdarray[0] = kvmfd;
142     fdarray[1] = vmfd;
143     fdarray[2] = cpufd;
144 
145     return true;
146 
147 err:
148     if (cpufd >= 0) {
149         close(cpufd);
150     }
151     if (vmfd >= 0) {
152         close(vmfd);
153     }
154     if (kvmfd >= 0) {
155         close(kvmfd);
156     }
157 
158     return false;
159 }
160 
161 void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
162 {
163     int i;
164 
165     for (i = 2; i >= 0; i--) {
166         close(fdarray[i]);
167     }
168 }
169 
170 void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
171 {
172     CPUARMState *env = &cpu->env;
173 
174     if (!arm_host_cpu_features.dtb_compatible) {
175         if (!kvm_enabled() ||
176             !kvm_arm_get_host_cpu_features(&arm_host_cpu_features)) {
177             /* We can't report this error yet, so flag that we need to
178              * in arm_cpu_realizefn().
179              */
180             cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
181             cpu->host_cpu_probe_failed = true;
182             return;
183         }
184     }
185 
186     cpu->kvm_target = arm_host_cpu_features.target;
187     cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
188     cpu->isar = arm_host_cpu_features.isar;
189     env->features = arm_host_cpu_features.features;
190 }
191 
192 static bool kvm_no_adjvtime_get(Object *obj, Error **errp)
193 {
194     return !ARM_CPU(obj)->kvm_adjvtime;
195 }
196 
197 static void kvm_no_adjvtime_set(Object *obj, bool value, Error **errp)
198 {
199     ARM_CPU(obj)->kvm_adjvtime = !value;
200 }
201 
202 static bool kvm_steal_time_get(Object *obj, Error **errp)
203 {
204     return ARM_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF;
205 }
206 
207 static void kvm_steal_time_set(Object *obj, bool value, Error **errp)
208 {
209     ARM_CPU(obj)->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
210 }
211 
212 /* KVM VCPU properties should be prefixed with "kvm-". */
213 void kvm_arm_add_vcpu_properties(Object *obj)
214 {
215     ARMCPU *cpu = ARM_CPU(obj);
216     CPUARMState *env = &cpu->env;
217 
218     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
219         cpu->kvm_adjvtime = true;
220         object_property_add_bool(obj, "kvm-no-adjvtime", kvm_no_adjvtime_get,
221                                  kvm_no_adjvtime_set);
222         object_property_set_description(obj, "kvm-no-adjvtime",
223                                         "Set on to disable the adjustment of "
224                                         "the virtual counter. VM stopped time "
225                                         "will be counted.");
226     }
227 
228     cpu->kvm_steal_time = ON_OFF_AUTO_AUTO;
229     object_property_add_bool(obj, "kvm-steal-time", kvm_steal_time_get,
230                              kvm_steal_time_set);
231     object_property_set_description(obj, "kvm-steal-time",
232                                     "Set off to disable KVM steal time.");
233 }
234 
235 bool kvm_arm_pmu_supported(void)
236 {
237     return kvm_check_extension(kvm_state, KVM_CAP_ARM_PMU_V3);
238 }
239 
240 int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
241 {
242     KVMState *s = KVM_STATE(ms->accelerator);
243     int ret;
244 
245     ret = kvm_check_extension(s, KVM_CAP_ARM_VM_IPA_SIZE);
246     *fixed_ipa = ret <= 0;
247 
248     return ret > 0 ? ret : 40;
249 }
250 
251 int kvm_arch_get_default_type(MachineState *ms)
252 {
253     bool fixed_ipa;
254     int size = kvm_arm_get_max_vm_ipa_size(ms, &fixed_ipa);
255     return fixed_ipa ? 0 : size;
256 }
257 
258 int kvm_arch_init(MachineState *ms, KVMState *s)
259 {
260     int ret = 0;
261     /* For ARM interrupt delivery is always asynchronous,
262      * whether we are using an in-kernel VGIC or not.
263      */
264     kvm_async_interrupts_allowed = true;
265 
266     /*
267      * PSCI wakes up secondary cores, so we always need to
268      * have vCPUs waiting in kernel space
269      */
270     kvm_halt_in_kernel_allowed = true;
271 
272     cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
273 
274     if (ms->smp.cpus > 256 &&
275         !kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) {
276         error_report("Using more than 256 vcpus requires a host kernel "
277                      "with KVM_CAP_ARM_IRQ_LINE_LAYOUT_2");
278         ret = -EINVAL;
279     }
280 
281     if (kvm_check_extension(s, KVM_CAP_ARM_NISV_TO_USER)) {
282         if (kvm_vm_enable_cap(s, KVM_CAP_ARM_NISV_TO_USER, 0)) {
283             error_report("Failed to enable KVM_CAP_ARM_NISV_TO_USER cap");
284         } else {
285             /* Set status for supporting the external dabt injection */
286             cap_has_inject_ext_dabt = kvm_check_extension(s,
287                                     KVM_CAP_ARM_INJECT_EXT_DABT);
288         }
289     }
290 
291     if (s->kvm_eager_split_size) {
292         uint32_t sizes;
293 
294         sizes = kvm_vm_check_extension(s, KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES);
295         if (!sizes) {
296             s->kvm_eager_split_size = 0;
297             warn_report("Eager Page Split support not available");
298         } else if (!(s->kvm_eager_split_size & sizes)) {
299             error_report("Eager Page Split requested chunk size not valid");
300             ret = -EINVAL;
301         } else {
302             ret = kvm_vm_enable_cap(s, KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE, 0,
303                                     s->kvm_eager_split_size);
304             if (ret < 0) {
305                 error_report("Enabling of Eager Page Split failed: %s",
306                              strerror(-ret));
307             }
308         }
309     }
310 
311     kvm_arm_init_debug(s);
312 
313     return ret;
314 }
315 
316 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
317 {
318     return cpu->cpu_index;
319 }
320 
321 /* We track all the KVM devices which need their memory addresses
322  * passing to the kernel in a list of these structures.
323  * When board init is complete we run through the list and
324  * tell the kernel the base addresses of the memory regions.
325  * We use a MemoryListener to track mapping and unmapping of
326  * the regions during board creation, so the board models don't
327  * need to do anything special for the KVM case.
328  *
329  * Sometimes the address must be OR'ed with some other fields
330  * (for example for KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION).
331  * @kda_addr_ormask aims at storing the value of those fields.
332  */
333 typedef struct KVMDevice {
334     struct kvm_arm_device_addr kda;
335     struct kvm_device_attr kdattr;
336     uint64_t kda_addr_ormask;
337     MemoryRegion *mr;
338     QSLIST_ENTRY(KVMDevice) entries;
339     int dev_fd;
340 } KVMDevice;
341 
342 static QSLIST_HEAD(, KVMDevice) kvm_devices_head;
343 
344 static void kvm_arm_devlistener_add(MemoryListener *listener,
345                                     MemoryRegionSection *section)
346 {
347     KVMDevice *kd;
348 
349     QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
350         if (section->mr == kd->mr) {
351             kd->kda.addr = section->offset_within_address_space;
352         }
353     }
354 }
355 
356 static void kvm_arm_devlistener_del(MemoryListener *listener,
357                                     MemoryRegionSection *section)
358 {
359     KVMDevice *kd;
360 
361     QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
362         if (section->mr == kd->mr) {
363             kd->kda.addr = -1;
364         }
365     }
366 }
367 
368 static MemoryListener devlistener = {
369     .name = "kvm-arm",
370     .region_add = kvm_arm_devlistener_add,
371     .region_del = kvm_arm_devlistener_del,
372     .priority = MEMORY_LISTENER_PRIORITY_MIN,
373 };
374 
375 static void kvm_arm_set_device_addr(KVMDevice *kd)
376 {
377     struct kvm_device_attr *attr = &kd->kdattr;
378     int ret;
379 
380     /* If the device control API is available and we have a device fd on the
381      * KVMDevice struct, let's use the newer API
382      */
383     if (kd->dev_fd >= 0) {
384         uint64_t addr = kd->kda.addr;
385 
386         addr |= kd->kda_addr_ormask;
387         attr->addr = (uintptr_t)&addr;
388         ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
389     } else {
390         ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
391     }
392 
393     if (ret < 0) {
394         fprintf(stderr, "Failed to set device address: %s\n",
395                 strerror(-ret));
396         abort();
397     }
398 }
399 
400 static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
401 {
402     KVMDevice *kd, *tkd;
403 
404     QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
405         if (kd->kda.addr != -1) {
406             kvm_arm_set_device_addr(kd);
407         }
408         memory_region_unref(kd->mr);
409         QSLIST_REMOVE_HEAD(&kvm_devices_head, entries);
410         g_free(kd);
411     }
412     memory_listener_unregister(&devlistener);
413 }
414 
415 static Notifier notify = {
416     .notify = kvm_arm_machine_init_done,
417 };
418 
419 void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
420                              uint64_t attr, int dev_fd, uint64_t addr_ormask)
421 {
422     KVMDevice *kd;
423 
424     if (!kvm_irqchip_in_kernel()) {
425         return;
426     }
427 
428     if (QSLIST_EMPTY(&kvm_devices_head)) {
429         memory_listener_register(&devlistener, &address_space_memory);
430         qemu_add_machine_init_done_notifier(&notify);
431     }
432     kd = g_new0(KVMDevice, 1);
433     kd->mr = mr;
434     kd->kda.id = devid;
435     kd->kda.addr = -1;
436     kd->kdattr.flags = 0;
437     kd->kdattr.group = group;
438     kd->kdattr.attr = attr;
439     kd->dev_fd = dev_fd;
440     kd->kda_addr_ormask = addr_ormask;
441     QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
442     memory_region_ref(kd->mr);
443 }
444 
445 static int compare_u64(const void *a, const void *b)
446 {
447     if (*(uint64_t *)a > *(uint64_t *)b) {
448         return 1;
449     }
450     if (*(uint64_t *)a < *(uint64_t *)b) {
451         return -1;
452     }
453     return 0;
454 }
455 
456 /*
457  * cpreg_values are sorted in ascending order by KVM register ID
458  * (see kvm_arm_init_cpreg_list). This allows us to cheaply find
459  * the storage for a KVM register by ID with a binary search.
460  */
461 static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx)
462 {
463     uint64_t *res;
464 
465     res = bsearch(&regidx, cpu->cpreg_indexes, cpu->cpreg_array_len,
466                   sizeof(uint64_t), compare_u64);
467     assert(res);
468 
469     return &cpu->cpreg_values[res - cpu->cpreg_indexes];
470 }
471 
472 /* Initialize the ARMCPU cpreg list according to the kernel's
473  * definition of what CPU registers it knows about (and throw away
474  * the previous TCG-created cpreg list).
475  */
476 int kvm_arm_init_cpreg_list(ARMCPU *cpu)
477 {
478     struct kvm_reg_list rl;
479     struct kvm_reg_list *rlp;
480     int i, ret, arraylen;
481     CPUState *cs = CPU(cpu);
482 
483     rl.n = 0;
484     ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
485     if (ret != -E2BIG) {
486         return ret;
487     }
488     rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
489     rlp->n = rl.n;
490     ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
491     if (ret) {
492         goto out;
493     }
494     /* Sort the list we get back from the kernel, since cpreg_tuples
495      * must be in strictly ascending order.
496      */
497     qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
498 
499     for (i = 0, arraylen = 0; i < rlp->n; i++) {
500         if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
501             continue;
502         }
503         switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
504         case KVM_REG_SIZE_U32:
505         case KVM_REG_SIZE_U64:
506             break;
507         default:
508             fprintf(stderr, "Can't handle size of register in kernel list\n");
509             ret = -EINVAL;
510             goto out;
511         }
512 
513         arraylen++;
514     }
515 
516     cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
517     cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
518     cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
519                                          arraylen);
520     cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
521                                         arraylen);
522     cpu->cpreg_array_len = arraylen;
523     cpu->cpreg_vmstate_array_len = arraylen;
524 
525     for (i = 0, arraylen = 0; i < rlp->n; i++) {
526         uint64_t regidx = rlp->reg[i];
527         if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
528             continue;
529         }
530         cpu->cpreg_indexes[arraylen] = regidx;
531         arraylen++;
532     }
533     assert(cpu->cpreg_array_len == arraylen);
534 
535     if (!write_kvmstate_to_list(cpu)) {
536         /* Shouldn't happen unless kernel is inconsistent about
537          * what registers exist.
538          */
539         fprintf(stderr, "Initial read of kernel register state failed\n");
540         ret = -EINVAL;
541         goto out;
542     }
543 
544 out:
545     g_free(rlp);
546     return ret;
547 }
548 
549 bool write_kvmstate_to_list(ARMCPU *cpu)
550 {
551     CPUState *cs = CPU(cpu);
552     int i;
553     bool ok = true;
554 
555     for (i = 0; i < cpu->cpreg_array_len; i++) {
556         uint64_t regidx = cpu->cpreg_indexes[i];
557         uint32_t v32;
558         int ret;
559 
560         switch (regidx & KVM_REG_SIZE_MASK) {
561         case KVM_REG_SIZE_U32:
562             ret = kvm_get_one_reg(cs, regidx, &v32);
563             if (!ret) {
564                 cpu->cpreg_values[i] = v32;
565             }
566             break;
567         case KVM_REG_SIZE_U64:
568             ret = kvm_get_one_reg(cs, regidx, cpu->cpreg_values + i);
569             break;
570         default:
571             g_assert_not_reached();
572         }
573         if (ret) {
574             ok = false;
575         }
576     }
577     return ok;
578 }
579 
580 bool write_list_to_kvmstate(ARMCPU *cpu, int level)
581 {
582     CPUState *cs = CPU(cpu);
583     int i;
584     bool ok = true;
585 
586     for (i = 0; i < cpu->cpreg_array_len; i++) {
587         uint64_t regidx = cpu->cpreg_indexes[i];
588         uint32_t v32;
589         int ret;
590 
591         if (kvm_arm_cpreg_level(regidx) > level) {
592             continue;
593         }
594 
595         switch (regidx & KVM_REG_SIZE_MASK) {
596         case KVM_REG_SIZE_U32:
597             v32 = cpu->cpreg_values[i];
598             ret = kvm_set_one_reg(cs, regidx, &v32);
599             break;
600         case KVM_REG_SIZE_U64:
601             ret = kvm_set_one_reg(cs, regidx, cpu->cpreg_values + i);
602             break;
603         default:
604             g_assert_not_reached();
605         }
606         if (ret) {
607             /* We might fail for "unknown register" and also for
608              * "you tried to set a register which is constant with
609              * a different value from what it actually contains".
610              */
611             ok = false;
612         }
613     }
614     return ok;
615 }
616 
617 void kvm_arm_cpu_pre_save(ARMCPU *cpu)
618 {
619     /* KVM virtual time adjustment */
620     if (cpu->kvm_vtime_dirty) {
621         *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT) = cpu->kvm_vtime;
622     }
623 }
624 
625 void kvm_arm_cpu_post_load(ARMCPU *cpu)
626 {
627     /* KVM virtual time adjustment */
628     if (cpu->kvm_adjvtime) {
629         cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT);
630         cpu->kvm_vtime_dirty = true;
631     }
632 }
633 
634 void kvm_arm_reset_vcpu(ARMCPU *cpu)
635 {
636     int ret;
637 
638     /* Re-init VCPU so that all registers are set to
639      * their respective reset values.
640      */
641     ret = kvm_arm_vcpu_init(CPU(cpu));
642     if (ret < 0) {
643         fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
644         abort();
645     }
646     if (!write_kvmstate_to_list(cpu)) {
647         fprintf(stderr, "write_kvmstate_to_list failed\n");
648         abort();
649     }
650     /*
651      * Sync the reset values also into the CPUState. This is necessary
652      * because the next thing we do will be a kvm_arch_put_registers()
653      * which will update the list values from the CPUState before copying
654      * the list values back to KVM. It's OK to ignore failure returns here
655      * for the same reason we do so in kvm_arch_get_registers().
656      */
657     write_list_to_cpustate(cpu);
658 }
659 
660 /*
661  * Update KVM's MP_STATE based on what QEMU thinks it is
662  */
663 int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
664 {
665     if (cap_has_mp_state) {
666         struct kvm_mp_state mp_state = {
667             .mp_state = (cpu->power_state == PSCI_OFF) ?
668             KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
669         };
670         int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
671         if (ret) {
672             fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n",
673                     __func__, ret, strerror(-ret));
674             return -1;
675         }
676     }
677 
678     return 0;
679 }
680 
681 /*
682  * Sync the KVM MP_STATE into QEMU
683  */
684 int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
685 {
686     if (cap_has_mp_state) {
687         struct kvm_mp_state mp_state;
688         int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
689         if (ret) {
690             fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n",
691                     __func__, ret, strerror(-ret));
692             abort();
693         }
694         cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ?
695             PSCI_OFF : PSCI_ON;
696     }
697 
698     return 0;
699 }
700 
701 void kvm_arm_get_virtual_time(CPUState *cs)
702 {
703     ARMCPU *cpu = ARM_CPU(cs);
704     int ret;
705 
706     if (cpu->kvm_vtime_dirty) {
707         return;
708     }
709 
710     ret = kvm_get_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
711     if (ret) {
712         error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
713         abort();
714     }
715 
716     cpu->kvm_vtime_dirty = true;
717 }
718 
719 void kvm_arm_put_virtual_time(CPUState *cs)
720 {
721     ARMCPU *cpu = ARM_CPU(cs);
722     int ret;
723 
724     if (!cpu->kvm_vtime_dirty) {
725         return;
726     }
727 
728     ret = kvm_set_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
729     if (ret) {
730         error_report("Failed to set KVM_REG_ARM_TIMER_CNT");
731         abort();
732     }
733 
734     cpu->kvm_vtime_dirty = false;
735 }
736 
737 int kvm_put_vcpu_events(ARMCPU *cpu)
738 {
739     CPUARMState *env = &cpu->env;
740     struct kvm_vcpu_events events;
741     int ret;
742 
743     if (!kvm_has_vcpu_events()) {
744         return 0;
745     }
746 
747     memset(&events, 0, sizeof(events));
748     events.exception.serror_pending = env->serror.pending;
749 
750     /* Inject SError to guest with specified syndrome if host kernel
751      * supports it, otherwise inject SError without syndrome.
752      */
753     if (cap_has_inject_serror_esr) {
754         events.exception.serror_has_esr = env->serror.has_esr;
755         events.exception.serror_esr = env->serror.esr;
756     }
757 
758     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
759     if (ret) {
760         error_report("failed to put vcpu events");
761     }
762 
763     return ret;
764 }
765 
766 int kvm_get_vcpu_events(ARMCPU *cpu)
767 {
768     CPUARMState *env = &cpu->env;
769     struct kvm_vcpu_events events;
770     int ret;
771 
772     if (!kvm_has_vcpu_events()) {
773         return 0;
774     }
775 
776     memset(&events, 0, sizeof(events));
777     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
778     if (ret) {
779         error_report("failed to get vcpu events");
780         return ret;
781     }
782 
783     env->serror.pending = events.exception.serror_pending;
784     env->serror.has_esr = events.exception.serror_has_esr;
785     env->serror.esr = events.exception.serror_esr;
786 
787     return 0;
788 }
789 
790 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
791 {
792     ARMCPU *cpu = ARM_CPU(cs);
793     CPUARMState *env = &cpu->env;
794 
795     if (unlikely(env->ext_dabt_raised)) {
796         /*
797          * Verifying that the ext DABT has been properly injected,
798          * otherwise risking indefinitely re-running the faulting instruction
799          * Covering a very narrow case for kernels 5.5..5.5.4
800          * when injected abort was misconfigured to be
801          * an IMPLEMENTATION DEFINED exception (for 32-bit EL1)
802          */
803         if (!arm_feature(env, ARM_FEATURE_AARCH64) &&
804             unlikely(!kvm_arm_verify_ext_dabt_pending(cs))) {
805 
806             error_report("Data abort exception with no valid ISS generated by "
807                    "guest memory access. KVM unable to emulate faulting "
808                    "instruction. Failed to inject an external data abort "
809                    "into the guest.");
810             abort();
811        }
812        /* Clear the status */
813        env->ext_dabt_raised = 0;
814     }
815 }
816 
817 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
818 {
819     ARMCPU *cpu;
820     uint32_t switched_level;
821 
822     if (kvm_irqchip_in_kernel()) {
823         /*
824          * We only need to sync timer states with user-space interrupt
825          * controllers, so return early and save cycles if we don't.
826          */
827         return MEMTXATTRS_UNSPECIFIED;
828     }
829 
830     cpu = ARM_CPU(cs);
831 
832     /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */
833     if (run->s.regs.device_irq_level != cpu->device_irq_level) {
834         switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
835 
836         qemu_mutex_lock_iothread();
837 
838         if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
839             qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
840                          !!(run->s.regs.device_irq_level &
841                             KVM_ARM_DEV_EL1_VTIMER));
842             switched_level &= ~KVM_ARM_DEV_EL1_VTIMER;
843         }
844 
845         if (switched_level & KVM_ARM_DEV_EL1_PTIMER) {
846             qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS],
847                          !!(run->s.regs.device_irq_level &
848                             KVM_ARM_DEV_EL1_PTIMER));
849             switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
850         }
851 
852         if (switched_level & KVM_ARM_DEV_PMU) {
853             qemu_set_irq(cpu->pmu_interrupt,
854                          !!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU));
855             switched_level &= ~KVM_ARM_DEV_PMU;
856         }
857 
858         if (switched_level) {
859             qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
860                           __func__, switched_level);
861         }
862 
863         /* We also mark unknown levels as processed to not waste cycles */
864         cpu->device_irq_level = run->s.regs.device_irq_level;
865         qemu_mutex_unlock_iothread();
866     }
867 
868     return MEMTXATTRS_UNSPECIFIED;
869 }
870 
871 void kvm_arm_vm_state_change(void *opaque, bool running, RunState state)
872 {
873     CPUState *cs = opaque;
874     ARMCPU *cpu = ARM_CPU(cs);
875 
876     if (running) {
877         if (cpu->kvm_adjvtime) {
878             kvm_arm_put_virtual_time(cs);
879         }
880     } else {
881         if (cpu->kvm_adjvtime) {
882             kvm_arm_get_virtual_time(cs);
883         }
884     }
885 }
886 
887 /**
888  * kvm_arm_handle_dabt_nisv:
889  * @cs: CPUState
890  * @esr_iss: ISS encoding (limited) for the exception from Data Abort
891  *           ISV bit set to '0b0' -> no valid instruction syndrome
892  * @fault_ipa: faulting address for the synchronous data abort
893  *
894  * Returns: 0 if the exception has been handled, < 0 otherwise
895  */
896 static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,
897                                     uint64_t fault_ipa)
898 {
899     ARMCPU *cpu = ARM_CPU(cs);
900     CPUARMState *env = &cpu->env;
901     /*
902      * Request KVM to inject the external data abort into the guest
903      */
904     if (cap_has_inject_ext_dabt) {
905         struct kvm_vcpu_events events = { };
906         /*
907          * The external data abort event will be handled immediately by KVM
908          * using the address fault that triggered the exit on given VCPU.
909          * Requesting injection of the external data abort does not rely
910          * on any other VCPU state. Therefore, in this particular case, the VCPU
911          * synchronization can be exceptionally skipped.
912          */
913         events.exception.ext_dabt_pending = 1;
914         /* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */
915         if (!kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events)) {
916             env->ext_dabt_raised = 1;
917             return 0;
918         }
919     } else {
920         error_report("Data abort exception triggered by guest memory access "
921                      "at physical address: 0x"  TARGET_FMT_lx,
922                      (target_ulong)fault_ipa);
923         error_printf("KVM unable to emulate faulting instruction.\n");
924     }
925     return -1;
926 }
927 
928 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
929 {
930     int ret = 0;
931 
932     switch (run->exit_reason) {
933     case KVM_EXIT_DEBUG:
934         if (kvm_arm_handle_debug(cs, &run->debug.arch)) {
935             ret = EXCP_DEBUG;
936         } /* otherwise return to guest */
937         break;
938     case KVM_EXIT_ARM_NISV:
939         /* External DABT with no valid iss to decode */
940         ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss,
941                                        run->arm_nisv.fault_ipa);
942         break;
943     default:
944         qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
945                       __func__, run->exit_reason);
946         break;
947     }
948     return ret;
949 }
950 
951 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
952 {
953     return true;
954 }
955 
956 int kvm_arch_process_async_events(CPUState *cs)
957 {
958     return 0;
959 }
960 
961 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
962 {
963     if (kvm_sw_breakpoints_active(cs)) {
964         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
965     }
966     if (kvm_arm_hw_debug_active(cs)) {
967         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
968         kvm_arm_copy_hw_debug_data(&dbg->arch);
969     }
970 }
971 
972 void kvm_arch_init_irq_routing(KVMState *s)
973 {
974 }
975 
976 int kvm_arch_irqchip_create(KVMState *s)
977 {
978     if (kvm_kernel_irqchip_split()) {
979         error_report("-machine kernel_irqchip=split is not supported on ARM.");
980         exit(1);
981     }
982 
983     /* If we can create the VGIC using the newer device control API, we
984      * let the device do this when it initializes itself, otherwise we
985      * fall back to the old API */
986     return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
987 }
988 
989 int kvm_arm_vgic_probe(void)
990 {
991     int val = 0;
992 
993     if (kvm_create_device(kvm_state,
994                           KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
995         val |= KVM_ARM_VGIC_V3;
996     }
997     if (kvm_create_device(kvm_state,
998                           KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
999         val |= KVM_ARM_VGIC_V2;
1000     }
1001     return val;
1002 }
1003 
1004 int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level)
1005 {
1006     int kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT) | irq;
1007     int cpu_idx1 = cpu % 256;
1008     int cpu_idx2 = cpu / 256;
1009 
1010     kvm_irq |= (cpu_idx1 << KVM_ARM_IRQ_VCPU_SHIFT) |
1011                (cpu_idx2 << KVM_ARM_IRQ_VCPU2_SHIFT);
1012 
1013     return kvm_set_irq(kvm_state, kvm_irq, !!level);
1014 }
1015 
1016 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1017                              uint64_t address, uint32_t data, PCIDevice *dev)
1018 {
1019     AddressSpace *as = pci_device_iommu_address_space(dev);
1020     hwaddr xlat, len, doorbell_gpa;
1021     MemoryRegionSection mrs;
1022     MemoryRegion *mr;
1023 
1024     if (as == &address_space_memory) {
1025         return 0;
1026     }
1027 
1028     /* MSI doorbell address is translated by an IOMMU */
1029 
1030     RCU_READ_LOCK_GUARD();
1031 
1032     mr = address_space_translate(as, address, &xlat, &len, true,
1033                                  MEMTXATTRS_UNSPECIFIED);
1034 
1035     if (!mr) {
1036         return 1;
1037     }
1038 
1039     mrs = memory_region_find(mr, xlat, 1);
1040 
1041     if (!mrs.mr) {
1042         return 1;
1043     }
1044 
1045     doorbell_gpa = mrs.offset_within_address_space;
1046     memory_region_unref(mrs.mr);
1047 
1048     route->u.msi.address_lo = doorbell_gpa;
1049     route->u.msi.address_hi = doorbell_gpa >> 32;
1050 
1051     trace_kvm_arm_fixup_msi_route(address, doorbell_gpa);
1052 
1053     return 0;
1054 }
1055 
1056 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1057                                 int vector, PCIDevice *dev)
1058 {
1059     return 0;
1060 }
1061 
1062 int kvm_arch_release_virq_post(int virq)
1063 {
1064     return 0;
1065 }
1066 
1067 int kvm_arch_msi_data_to_gsi(uint32_t data)
1068 {
1069     return (data - 32) & 0xffff;
1070 }
1071 
1072 bool kvm_arch_cpu_check_are_resettable(void)
1073 {
1074     return true;
1075 }
1076 
1077 static void kvm_arch_get_eager_split_size(Object *obj, Visitor *v,
1078                                           const char *name, void *opaque,
1079                                           Error **errp)
1080 {
1081     KVMState *s = KVM_STATE(obj);
1082     uint64_t value = s->kvm_eager_split_size;
1083 
1084     visit_type_size(v, name, &value, errp);
1085 }
1086 
1087 static void kvm_arch_set_eager_split_size(Object *obj, Visitor *v,
1088                                           const char *name, void *opaque,
1089                                           Error **errp)
1090 {
1091     KVMState *s = KVM_STATE(obj);
1092     uint64_t value;
1093 
1094     if (s->fd != -1) {
1095         error_setg(errp, "Unable to set early-split-size after KVM has been initialized");
1096         return;
1097     }
1098 
1099     if (!visit_type_size(v, name, &value, errp)) {
1100         return;
1101     }
1102 
1103     if (value && !is_power_of_2(value)) {
1104         error_setg(errp, "early-split-size must be a power of two");
1105         return;
1106     }
1107 
1108     s->kvm_eager_split_size = value;
1109 }
1110 
1111 void kvm_arch_accel_class_init(ObjectClass *oc)
1112 {
1113     object_class_property_add(oc, "eager-split-size", "size",
1114                               kvm_arch_get_eager_split_size,
1115                               kvm_arch_set_eager_split_size, NULL, NULL);
1116 
1117     object_class_property_set_description(oc, "eager-split-size",
1118         "Eager Page Split chunk size for hugepages. (default: 0, disabled)");
1119 }
1120