Lines Matching +full:cpu +full:- +full:2
4 * Copyright Christoffer Dall 2009-2010
5 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
28 #include "cpu.h"
33 #include "exec/address-spaces.h"
54 * ARMHostCPUFeatures: information about the host CPU (identified
68 * @cpu: ARMCPU
71 * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature
76 static int kvm_arm_vcpu_init(ARMCPU *cpu) in kvm_arm_vcpu_init() argument
80 init.target = cpu->kvm_target; in kvm_arm_vcpu_init()
81 memcpy(init.features, cpu->kvm_init_features, sizeof(init.features)); in kvm_arm_vcpu_init()
83 return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_INIT, &init); in kvm_arm_vcpu_init()
88 * @cpu: ARMCPU
98 static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature) in kvm_arm_vcpu_finalize() argument
100 return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature); in kvm_arm_vcpu_finalize()
107 int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1; in kvm_arm_create_scratch_host_vcpu()
120 } while (vmfd == -1 && errno == EINTR); in kvm_arm_create_scratch_host_vcpu()
149 if (init->target == -1) { in kvm_arm_create_scratch_host_vcpu()
154 init->target = preferred.target; in kvm_arm_create_scratch_host_vcpu()
165 * creating one kind of guest CPU which is its preferred in kvm_arm_create_scratch_host_vcpu()
166 * CPU type. in kvm_arm_create_scratch_host_vcpu()
172 memcpy(try.features, init->features, sizeof(init->features)); in kvm_arm_create_scratch_host_vcpu()
181 init->target = try.target; in kvm_arm_create_scratch_host_vcpu()
193 fdarray[2] = cpufd; in kvm_arm_create_scratch_host_vcpu()
215 for (i = 2; i >= 0; i--) { in kvm_arm_destroy_scratch_host_vcpu()
229 return -1; in read_sys_reg32()
251 /* Identify the feature bits corresponding to the host CPU, and in kvm_arm_get_host_cpu_features()
253 * we have to create a scratch VM, create a single CPU inside it, in kvm_arm_get_host_cpu_features()
254 * and then query that CPU for the relevant ID registers. in kvm_arm_get_host_cpu_features()
263 * we know these will only support creating one kind of guest CPU, in kvm_arm_get_host_cpu_features()
264 * which is its preferred CPU type. Fortunately these old kernels in kvm_arm_get_host_cpu_features()
274 * target = -1 informs kvm_arm_create_scratch_host_vcpu() in kvm_arm_get_host_cpu_features()
277 struct kvm_vcpu_init init = { .target = -1, }; in kvm_arm_get_host_cpu_features()
307 ahcf->target = init.target; in kvm_arm_get_host_cpu_features()
308 ahcf->dtb_compatible = "arm,arm-v8"; in kvm_arm_get_host_cpu_features()
310 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, in kvm_arm_get_host_cpu_features()
324 * run the tiniest of hand-crafted kernels to extract in kvm_arm_get_host_cpu_features()
329 ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ in kvm_arm_get_host_cpu_features()
332 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, in kvm_arm_get_host_cpu_features()
334 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, in kvm_arm_get_host_cpu_features()
336 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, in kvm_arm_get_host_cpu_features()
338 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, in kvm_arm_get_host_cpu_features()
340 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, in kvm_arm_get_host_cpu_features()
342 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, in kvm_arm_get_host_cpu_features()
344 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, in kvm_arm_get_host_cpu_features()
345 ARM64_SYS_REG(3, 0, 0, 6, 2)); in kvm_arm_get_host_cpu_features()
346 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, in kvm_arm_get_host_cpu_features()
348 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, in kvm_arm_get_host_cpu_features()
350 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, in kvm_arm_get_host_cpu_features()
351 ARM64_SYS_REG(3, 0, 0, 7, 2)); in kvm_arm_get_host_cpu_features()
352 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr3, in kvm_arm_get_host_cpu_features()
362 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, in kvm_arm_get_host_cpu_features()
364 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, in kvm_arm_get_host_cpu_features()
366 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, in kvm_arm_get_host_cpu_features()
367 ARM64_SYS_REG(3, 0, 0, 1, 2)); in kvm_arm_get_host_cpu_features()
368 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, in kvm_arm_get_host_cpu_features()
370 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, in kvm_arm_get_host_cpu_features()
372 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, in kvm_arm_get_host_cpu_features()
374 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, in kvm_arm_get_host_cpu_features()
376 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, in kvm_arm_get_host_cpu_features()
377 ARM64_SYS_REG(3, 0, 0, 2, 0)); in kvm_arm_get_host_cpu_features()
378 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, in kvm_arm_get_host_cpu_features()
379 ARM64_SYS_REG(3, 0, 0, 2, 1)); in kvm_arm_get_host_cpu_features()
380 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, in kvm_arm_get_host_cpu_features()
381 ARM64_SYS_REG(3, 0, 0, 2, 2)); in kvm_arm_get_host_cpu_features()
382 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, in kvm_arm_get_host_cpu_features()
383 ARM64_SYS_REG(3, 0, 0, 2, 3)); in kvm_arm_get_host_cpu_features()
384 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, in kvm_arm_get_host_cpu_features()
385 ARM64_SYS_REG(3, 0, 0, 2, 4)); in kvm_arm_get_host_cpu_features()
386 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, in kvm_arm_get_host_cpu_features()
387 ARM64_SYS_REG(3, 0, 0, 2, 5)); in kvm_arm_get_host_cpu_features()
388 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, in kvm_arm_get_host_cpu_features()
389 ARM64_SYS_REG(3, 0, 0, 2, 6)); in kvm_arm_get_host_cpu_features()
390 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, in kvm_arm_get_host_cpu_features()
391 ARM64_SYS_REG(3, 0, 0, 2, 7)); in kvm_arm_get_host_cpu_features()
393 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, in kvm_arm_get_host_cpu_features()
395 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, in kvm_arm_get_host_cpu_features()
397 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, in kvm_arm_get_host_cpu_features()
398 ARM64_SYS_REG(3, 0, 0, 3, 2)); in kvm_arm_get_host_cpu_features()
399 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, in kvm_arm_get_host_cpu_features()
401 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, in kvm_arm_get_host_cpu_features()
403 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, in kvm_arm_get_host_cpu_features()
408 * provide an accessor for it in 64-bit mode, which is what this in kvm_arm_get_host_cpu_features()
409 * scratch VM is in, and there's no architected "64-bit sysreg in kvm_arm_get_host_cpu_features()
410 * which reads the same as the 32-bit register" the way there is in kvm_arm_get_host_cpu_features()
414 * We only do this if the CPU supports AArch32 at EL1. in kvm_arm_get_host_cpu_features()
416 if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { in kvm_arm_get_host_cpu_features()
417 int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); in kvm_arm_get_host_cpu_features()
418 int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); in kvm_arm_get_host_cpu_features()
420 FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); in kvm_arm_get_host_cpu_features()
423 !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); in kvm_arm_get_host_cpu_features()
433 ahcf->isar.dbgdidr = dbgdidr; in kvm_arm_get_host_cpu_features()
438 err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, in kvm_arm_get_host_cpu_features()
450 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, in kvm_arm_get_host_cpu_features()
462 * We can assume any KVM supporting CPU is at least a v8 in kvm_arm_get_host_cpu_features()
471 ahcf->features = features; in kvm_arm_get_host_cpu_features()
476 void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) in kvm_arm_set_cpu_features_from_host() argument
478 CPUARMState *env = &cpu->env; in kvm_arm_set_cpu_features_from_host()
486 cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; in kvm_arm_set_cpu_features_from_host()
487 cpu->host_cpu_probe_failed = true; in kvm_arm_set_cpu_features_from_host()
492 cpu->kvm_target = arm_host_cpu_features.target; in kvm_arm_set_cpu_features_from_host()
493 cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible; in kvm_arm_set_cpu_features_from_host()
494 cpu->isar = arm_host_cpu_features.isar; in kvm_arm_set_cpu_features_from_host()
495 env->features = arm_host_cpu_features.features; in kvm_arm_set_cpu_features_from_host()
500 return !ARM_CPU(obj)->kvm_adjvtime; in kvm_no_adjvtime_get()
505 ARM_CPU(obj)->kvm_adjvtime = !value; in kvm_no_adjvtime_set()
510 return ARM_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF; in kvm_steal_time_get()
515 ARM_CPU(obj)->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; in kvm_steal_time_set()
518 /* KVM VCPU properties should be prefixed with "kvm-". */
519 void kvm_arm_add_vcpu_properties(ARMCPU *cpu) in kvm_arm_add_vcpu_properties() argument
521 CPUARMState *env = &cpu->env; in kvm_arm_add_vcpu_properties()
522 Object *obj = OBJECT(cpu); in kvm_arm_add_vcpu_properties()
525 cpu->kvm_adjvtime = true; in kvm_arm_add_vcpu_properties()
526 object_property_add_bool(obj, "kvm-no-adjvtime", kvm_no_adjvtime_get, in kvm_arm_add_vcpu_properties()
528 object_property_set_description(obj, "kvm-no-adjvtime", in kvm_arm_add_vcpu_properties()
534 cpu->kvm_steal_time = ON_OFF_AUTO_AUTO; in kvm_arm_add_vcpu_properties()
535 object_property_add_bool(obj, "kvm-steal-time", kvm_steal_time_get, in kvm_arm_add_vcpu_properties()
537 object_property_set_description(obj, "kvm-steal-time", in kvm_arm_add_vcpu_properties()
548 KVMState *s = KVM_STATE(ms->accelerator); in kvm_arm_get_max_vm_ipa_size()
568 * whether we are using an in-kernel VGIC or not. in kvm_arch_init()
584 if (ms->smp.cpus > 256 && in kvm_arch_init()
588 ret = -EINVAL; in kvm_arch_init()
601 if (s->kvm_eager_split_size) { in kvm_arch_init()
606 s->kvm_eager_split_size = 0; in kvm_arch_init()
608 } else if (!(s->kvm_eager_split_size & sizes)) { in kvm_arch_init()
610 ret = -EINVAL; in kvm_arch_init()
613 s->kvm_eager_split_size); in kvm_arch_init()
616 strerror(-ret)); in kvm_arch_init()
632 unsigned long kvm_arch_vcpu_id(CPUState *cpu) in kvm_arch_vcpu_id() argument
634 return cpu->cpu_index; in kvm_arch_vcpu_id()
666 if (section->mr == kd->mr) { in kvm_arm_devlistener_add()
667 kd->kda.addr = section->offset_within_address_space; in kvm_arm_devlistener_add()
678 if (section->mr == kd->mr) { in kvm_arm_devlistener_del()
679 kd->kda.addr = -1; in kvm_arm_devlistener_del()
685 .name = "kvm-arm",
693 struct kvm_device_attr *attr = &kd->kdattr; in kvm_arm_set_device_addr()
695 uint64_t addr = kd->kda.addr; in kvm_arm_set_device_addr()
697 addr |= kd->kda_addr_ormask; in kvm_arm_set_device_addr()
698 attr->addr = (uintptr_t)&addr; in kvm_arm_set_device_addr()
699 ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr); in kvm_arm_set_device_addr()
703 strerror(-ret)); in kvm_arm_set_device_addr()
713 if (kd->kda.addr != -1) { in kvm_arm_machine_init_done()
716 memory_region_unref(kd->mr); in kvm_arm_machine_init_done()
741 kd->mr = mr; in kvm_arm_register_device()
742 kd->kda.id = devid; in kvm_arm_register_device()
743 kd->kda.addr = -1; in kvm_arm_register_device()
744 kd->kdattr.flags = 0; in kvm_arm_register_device()
745 kd->kdattr.group = group; in kvm_arm_register_device()
746 kd->kdattr.attr = attr; in kvm_arm_register_device()
747 kd->dev_fd = dev_fd; in kvm_arm_register_device()
748 kd->kda_addr_ormask = addr_ormask; in kvm_arm_register_device()
750 memory_region_ref(kd->mr); in kvm_arm_register_device()
759 return -1; in compare_u64()
769 static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx) in kvm_arm_get_cpreg_ptr() argument
773 res = bsearch(®idx, cpu->cpreg_indexes, cpu->cpreg_array_len, in kvm_arm_get_cpreg_ptr()
777 return &cpu->cpreg_values[res - cpu->cpreg_indexes]; in kvm_arm_get_cpreg_ptr()
801 * @cpu: ARMCPU
804 * definition of what CPU registers it knows about (and throw away
805 * the previous TCG-created cpreg list).
809 static int kvm_arm_init_cpreg_list(ARMCPU *cpu) in kvm_arm_init_cpreg_list() argument
814 CPUState *cs = CPU(cpu); in kvm_arm_init_cpreg_list()
818 if (ret != -E2BIG) { in kvm_arm_init_cpreg_list()
822 rlp->n = rl.n; in kvm_arm_init_cpreg_list()
830 qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64); in kvm_arm_init_cpreg_list()
832 for (i = 0, arraylen = 0; i < rlp->n; i++) { in kvm_arm_init_cpreg_list()
833 if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) { in kvm_arm_init_cpreg_list()
836 switch (rlp->reg[i] & KVM_REG_SIZE_MASK) { in kvm_arm_init_cpreg_list()
842 ret = -EINVAL; in kvm_arm_init_cpreg_list()
849 cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen); in kvm_arm_init_cpreg_list()
850 cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen); in kvm_arm_init_cpreg_list()
851 cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes, in kvm_arm_init_cpreg_list()
853 cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values, in kvm_arm_init_cpreg_list()
855 cpu->cpreg_array_len = arraylen; in kvm_arm_init_cpreg_list()
856 cpu->cpreg_vmstate_array_len = arraylen; in kvm_arm_init_cpreg_list()
858 for (i = 0, arraylen = 0; i < rlp->n; i++) { in kvm_arm_init_cpreg_list()
859 uint64_t regidx = rlp->reg[i]; in kvm_arm_init_cpreg_list()
863 cpu->cpreg_indexes[arraylen] = regidx; in kvm_arm_init_cpreg_list()
866 assert(cpu->cpreg_array_len == arraylen); in kvm_arm_init_cpreg_list()
868 if (!write_kvmstate_to_list(cpu)) { in kvm_arm_init_cpreg_list()
873 ret = -EINVAL; in kvm_arm_init_cpreg_list()
904 bool write_kvmstate_to_list(ARMCPU *cpu) in write_kvmstate_to_list() argument
906 CPUState *cs = CPU(cpu); in write_kvmstate_to_list()
910 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_kvmstate_to_list()
911 uint64_t regidx = cpu->cpreg_indexes[i]; in write_kvmstate_to_list()
919 cpu->cpreg_values[i] = v32; in write_kvmstate_to_list()
923 ret = kvm_get_one_reg(cs, regidx, cpu->cpreg_values + i); in write_kvmstate_to_list()
935 bool write_list_to_kvmstate(ARMCPU *cpu, int level) in write_list_to_kvmstate() argument
937 CPUState *cs = CPU(cpu); in write_list_to_kvmstate()
941 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_list_to_kvmstate()
942 uint64_t regidx = cpu->cpreg_indexes[i]; in write_list_to_kvmstate()
952 v32 = cpu->cpreg_values[i]; in write_list_to_kvmstate()
956 ret = kvm_set_one_reg(cs, regidx, cpu->cpreg_values + i); in write_list_to_kvmstate()
972 void kvm_arm_cpu_pre_save(ARMCPU *cpu) in kvm_arm_cpu_pre_save() argument
975 if (cpu->kvm_vtime_dirty) { in kvm_arm_cpu_pre_save()
976 *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT) = cpu->kvm_vtime; in kvm_arm_cpu_pre_save()
980 void kvm_arm_cpu_post_load(ARMCPU *cpu) in kvm_arm_cpu_post_load() argument
983 if (cpu->kvm_adjvtime) { in kvm_arm_cpu_post_load()
984 cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT); in kvm_arm_cpu_post_load()
985 cpu->kvm_vtime_dirty = true; in kvm_arm_cpu_post_load()
989 void kvm_arm_reset_vcpu(ARMCPU *cpu) in kvm_arm_reset_vcpu() argument
993 /* Re-init VCPU so that all registers are set to in kvm_arm_reset_vcpu()
996 ret = kvm_arm_vcpu_init(cpu); in kvm_arm_reset_vcpu()
998 fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret)); in kvm_arm_reset_vcpu()
1001 if (!write_kvmstate_to_list(cpu)) { in kvm_arm_reset_vcpu()
1012 write_list_to_cpustate(cpu); in kvm_arm_reset_vcpu()
1018 static int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) in kvm_arm_sync_mpstate_to_kvm() argument
1022 .mp_state = (cpu->power_state == PSCI_OFF) ? in kvm_arm_sync_mpstate_to_kvm()
1025 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); in kvm_arm_sync_mpstate_to_kvm()
1033 static int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) in kvm_arm_sync_mpstate_to_qemu() argument
1037 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state); in kvm_arm_sync_mpstate_to_qemu()
1041 cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ? in kvm_arm_sync_mpstate_to_qemu()
1049 * @cpu: ARMCPU
1051 * Gets the VCPU's virtual counter and stores it in the KVM CPU state.
1053 static void kvm_arm_get_virtual_time(ARMCPU *cpu) in kvm_arm_get_virtual_time() argument
1057 if (cpu->kvm_vtime_dirty) { in kvm_arm_get_virtual_time()
1061 ret = kvm_get_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); in kvm_arm_get_virtual_time()
1067 cpu->kvm_vtime_dirty = true; in kvm_arm_get_virtual_time()
1072 * @cpu: ARMCPU
1074 * Sets the VCPU's virtual counter to the value stored in the KVM CPU state.
1076 static void kvm_arm_put_virtual_time(ARMCPU *cpu) in kvm_arm_put_virtual_time() argument
1080 if (!cpu->kvm_vtime_dirty) { in kvm_arm_put_virtual_time()
1084 ret = kvm_set_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); in kvm_arm_put_virtual_time()
1090 cpu->kvm_vtime_dirty = false; in kvm_arm_put_virtual_time()
1095 * @cpu: ARMCPU
1101 static int kvm_put_vcpu_events(ARMCPU *cpu) in kvm_put_vcpu_events() argument
1103 CPUARMState *env = &cpu->env; in kvm_put_vcpu_events()
1112 events.exception.serror_pending = env->serror.pending; in kvm_put_vcpu_events()
1118 events.exception.serror_has_esr = env->serror.has_esr; in kvm_put_vcpu_events()
1119 events.exception.serror_esr = env->serror.esr; in kvm_put_vcpu_events()
1122 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); in kvm_put_vcpu_events()
1132 * @cpu: ARMCPU
1138 static int kvm_get_vcpu_events(ARMCPU *cpu) in kvm_get_vcpu_events() argument
1140 CPUARMState *env = &cpu->env; in kvm_get_vcpu_events()
1149 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); in kvm_get_vcpu_events()
1155 env->serror.pending = events.exception.serror_pending; in kvm_get_vcpu_events()
1156 env->serror.has_esr = events.exception.serror_has_esr; in kvm_get_vcpu_events()
1157 env->serror.esr = events.exception.serror_esr; in kvm_get_vcpu_events()
1162 #define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1163 #define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1171 * FS[4] - DFSR[10]
1172 * FS[3:0] - DFSR[3:0]
1185 * @cpu: ARMCPU
1191 static bool kvm_arm_verify_ext_dabt_pending(ARMCPU *cpu) in kvm_arm_verify_ext_dabt_pending() argument
1193 CPUState *cs = CPU(cpu); in kvm_arm_verify_ext_dabt_pending()
1197 CPUARMState *env = &cpu->env; in kvm_arm_verify_ext_dabt_pending()
1221 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_pre_run() local
1222 CPUARMState *env = &cpu->env; in kvm_arch_pre_run()
1224 if (unlikely(env->ext_dabt_raised)) { in kvm_arch_pre_run()
1227 * otherwise risking indefinitely re-running the faulting instruction in kvm_arch_pre_run()
1230 * an IMPLEMENTATION DEFINED exception (for 32-bit EL1) in kvm_arch_pre_run()
1233 unlikely(!kvm_arm_verify_ext_dabt_pending(cpu))) { in kvm_arch_pre_run()
1242 env->ext_dabt_raised = 0; in kvm_arch_pre_run()
1248 ARMCPU *cpu; in kvm_arch_post_run() local
1253 * We only need to sync timer states with user-space interrupt in kvm_arch_post_run()
1259 cpu = ARM_CPU(cs); in kvm_arch_post_run()
1261 /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */ in kvm_arch_post_run()
1262 if (run->s.regs.device_irq_level != cpu->device_irq_level) { in kvm_arch_post_run()
1263 switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level; in kvm_arch_post_run()
1268 qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT], in kvm_arch_post_run()
1269 !!(run->s.regs.device_irq_level & in kvm_arch_post_run()
1275 qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS], in kvm_arch_post_run()
1276 !!(run->s.regs.device_irq_level & in kvm_arch_post_run()
1282 qemu_set_irq(cpu->pmu_interrupt, in kvm_arch_post_run()
1283 !!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU)); in kvm_arch_post_run()
1288 qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n", in kvm_arch_post_run()
1293 cpu->device_irq_level = run->s.regs.device_irq_level; in kvm_arch_post_run()
1302 ARMCPU *cpu = opaque; in kvm_arm_vm_state_change() local
1305 if (cpu->kvm_adjvtime) { in kvm_arm_vm_state_change()
1306 kvm_arm_put_virtual_time(cpu); in kvm_arm_vm_state_change()
1309 if (cpu->kvm_adjvtime) { in kvm_arm_vm_state_change()
1310 kvm_arm_get_virtual_time(cpu); in kvm_arm_vm_state_change()
1317 * @cpu: ARMCPU
1319 * ISV bit set to '0b0' -> no valid instruction syndrome
1324 static int kvm_arm_handle_dabt_nisv(ARMCPU *cpu, uint64_t esr_iss, in kvm_arm_handle_dabt_nisv() argument
1327 CPUARMState *env = &cpu->env; in kvm_arm_handle_dabt_nisv()
1342 if (!kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events)) { in kvm_arm_handle_dabt_nisv()
1343 env->ext_dabt_raised = 1; in kvm_arm_handle_dabt_nisv()
1352 return -1; in kvm_arm_handle_dabt_nisv()
1357 * @cpu: ARMCPU
1364 * To minimise translating between kernel and user-space the kernel
1365 * ABI just provides user-space with the full exception syndrome
1368 static bool kvm_arm_handle_debug(ARMCPU *cpu, in kvm_arm_handle_debug() argument
1371 int hsr_ec = syn_get_ec(debug_exit->hsr); in kvm_arm_handle_debug()
1372 CPUState *cs = CPU(cpu); in kvm_arm_handle_debug()
1373 CPUARMState *env = &cpu->env; in kvm_arm_handle_debug()
1380 if (cs->singlestep_enabled) { in kvm_arm_handle_debug()
1387 error_report("%s: guest single-step while debugging unsupported" in kvm_arm_handle_debug()
1389 __func__, env->pc, debug_exit->hsr); in kvm_arm_handle_debug()
1394 if (kvm_find_sw_breakpoint(cs, env->pc)) { in kvm_arm_handle_debug()
1399 if (find_hw_breakpoint(cs, env->pc)) { in kvm_arm_handle_debug()
1405 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); in kvm_arm_handle_debug()
1407 cs->watchpoint_hit = wp; in kvm_arm_handle_debug()
1414 __func__, debug_exit->hsr, env->pc); in kvm_arm_handle_debug()
1418 * the guest. Let's re-use the existing TCG interrupt code to set in kvm_arm_handle_debug()
1421 cs->exception_index = EXCP_BKPT; in kvm_arm_handle_debug()
1422 env->exception.syndrome = debug_exit->hsr; in kvm_arm_handle_debug()
1423 env->exception.vaddress = debug_exit->far; in kvm_arm_handle_debug()
1424 env->exception.target_el = 1; in kvm_arm_handle_debug()
1434 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_handle_exit() local
1437 switch (run->exit_reason) { in kvm_arch_handle_exit()
1439 if (kvm_arm_handle_debug(cpu, &run->debug.arch)) { in kvm_arch_handle_exit()
1445 ret = kvm_arm_handle_dabt_nisv(cpu, run->arm_nisv.esr_iss, in kvm_arch_handle_exit()
1446 run->arm_nisv.fault_ipa); in kvm_arch_handle_exit()
1449 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n", in kvm_arch_handle_exit()
1450 __func__, run->exit_reason); in kvm_arch_handle_exit()
1468 * @cpu: ARMCPU
1472 static bool kvm_arm_hw_debug_active(ARMCPU *cpu) in kvm_arm_hw_debug_active() argument
1491 ptr->dbg_wcr[i] = wp->wcr; in kvm_arm_copy_hw_debug_data()
1492 ptr->dbg_wvr[i] = wp->wvr; in kvm_arm_copy_hw_debug_data()
1496 ptr->dbg_bcr[i] = bp->bcr; in kvm_arm_copy_hw_debug_data()
1497 ptr->dbg_bvr[i] = bp->bvr; in kvm_arm_copy_hw_debug_data()
1504 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; in kvm_arch_update_guest_debug()
1507 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW; in kvm_arch_update_guest_debug()
1508 kvm_arm_copy_hw_debug_data(&dbg->arch); in kvm_arch_update_guest_debug()
1519 error_report("-machine kernel_irqchip=split is not supported on ARM."); in kvm_arch_irqchip_create()
1544 int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level) in kvm_arm_set_irq() argument
1547 int cpu_idx1 = cpu % 256; in kvm_arm_set_irq()
1548 int cpu_idx2 = cpu / 256; in kvm_arm_set_irq()
1588 route->u.msi.address_lo = doorbell_gpa; in kvm_arch_fixup_msi_route()
1589 route->u.msi.address_hi = doorbell_gpa >> 32; in kvm_arch_fixup_msi_route()
1609 return (data - 32) & 0xffff; in kvm_arch_msi_data_to_gsi()
1617 uint64_t value = s->kvm_eager_split_size; in kvm_arch_get_eager_split_size()
1629 if (s->fd != -1) { in kvm_arch_set_eager_split_size()
1630 error_setg(errp, "Unable to set early-split-size after KVM has been initialized"); in kvm_arch_set_eager_split_size()
1639 error_setg(errp, "early-split-size must be a power of two"); in kvm_arch_set_eager_split_size()
1643 s->kvm_eager_split_size = value; in kvm_arch_set_eager_split_size()
1648 object_class_property_add(oc, "eager-split-size", "size", in kvm_arch_accel_class_init()
1652 object_class_property_set_description(oc, "eager-split-size", in kvm_arch_accel_class_init()
1667 return -ENOSYS; in kvm_arch_insert_hw_breakpoint()
1681 return -ENOSYS; in kvm_arch_remove_hw_breakpoint()
1695 static bool kvm_arm_set_device_attr(ARMCPU *cpu, struct kvm_device_attr *attr, in kvm_arm_set_device_attr() argument
1700 err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr); in kvm_arm_set_device_attr()
1702 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); in kvm_arm_set_device_attr()
1706 err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr); in kvm_arm_set_device_attr()
1708 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); in kvm_arm_set_device_attr()
1715 void kvm_arm_pmu_init(ARMCPU *cpu) in kvm_arm_pmu_init() argument
1722 if (!cpu->has_pmu) { in kvm_arm_pmu_init()
1725 if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { in kvm_arm_pmu_init()
1731 void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) in kvm_arm_pmu_set_irq() argument
1739 if (!cpu->has_pmu) { in kvm_arm_pmu_set_irq()
1742 if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { in kvm_arm_pmu_set_irq()
1748 void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) in kvm_arm_pvtime_init() argument
1756 if (cpu->kvm_steal_time == ON_OFF_AUTO_OFF) { in kvm_arm_pvtime_init()
1759 if (!kvm_arm_set_device_attr(cpu, &attr, "PVTIME IPA")) { in kvm_arm_pvtime_init()
1765 void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) in kvm_arm_steal_time_finalize() argument
1769 if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { in kvm_arm_steal_time_finalize()
1770 if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { in kvm_arm_steal_time_finalize()
1771 cpu->kvm_steal_time = ON_OFF_AUTO_OFF; in kvm_arm_steal_time_finalize()
1773 cpu->kvm_steal_time = ON_OFF_AUTO_ON; in kvm_arm_steal_time_finalize()
1775 } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { in kvm_arm_steal_time_finalize()
1777 error_setg(errp, "'kvm-steal-time' cannot be enabled " in kvm_arm_steal_time_finalize()
1780 } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { in kvm_arm_steal_time_finalize()
1782 * DEN0057A chapter 2 says "This specification only covers in kvm_arm_steal_time_finalize()
1788 error_setg(errp, "'kvm-steal-time' cannot be enabled " in kvm_arm_steal_time_finalize()
1812 uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) in kvm_arm_sve_get_vls() argument
1827 .target = -1, in kvm_arm_sve_get_vls()
1842 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); in kvm_arm_sve_get_vls()
1850 for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { in kvm_arm_sve_get_vls()
1852 vq = 64 - clz64(vls[i]) + i * 64; in kvm_arm_sve_get_vls()
1866 static int kvm_arm_sve_set_vls(ARMCPU *cpu) in kvm_arm_sve_set_vls() argument
1868 uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; in kvm_arm_sve_set_vls()
1870 assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); in kvm_arm_sve_set_vls()
1872 return kvm_set_one_reg(CPU(cpu), KVM_REG_ARM64_SVE_VLS, &vls[0]); in kvm_arm_sve_set_vls()
1881 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_init_vcpu() local
1882 CPUARMState *env = &cpu->env; in kvm_arch_init_vcpu()
1885 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || in kvm_arch_init_vcpu()
1886 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { in kvm_arch_init_vcpu()
1887 error_report("KVM is not supported for this guest CPU type"); in kvm_arch_init_vcpu()
1888 return -EINVAL; in kvm_arch_init_vcpu()
1891 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cpu); in kvm_arch_init_vcpu()
1893 /* Determine init features for this CPU */ in kvm_arch_init_vcpu()
1894 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); in kvm_arch_init_vcpu()
1895 if (cs->start_powered_off) { in kvm_arch_init_vcpu()
1896 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; in kvm_arch_init_vcpu()
1898 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { in kvm_arch_init_vcpu()
1899 cpu->psci_version = QEMU_PSCI_VERSION_0_2; in kvm_arch_init_vcpu()
1900 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; in kvm_arch_init_vcpu()
1903 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; in kvm_arch_init_vcpu()
1905 if (cpu->has_pmu) { in kvm_arch_init_vcpu()
1906 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; in kvm_arch_init_vcpu()
1908 if (cpu_isar_feature(aa64_sve, cpu)) { in kvm_arch_init_vcpu()
1910 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; in kvm_arch_init_vcpu()
1912 if (cpu_isar_feature(aa64_pauth, cpu)) { in kvm_arch_init_vcpu()
1913 cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | in kvm_arch_init_vcpu()
1918 ret = kvm_arm_vcpu_init(cpu); in kvm_arch_init_vcpu()
1923 if (cpu_isar_feature(aa64_sve, cpu)) { in kvm_arch_init_vcpu()
1924 ret = kvm_arm_sve_set_vls(cpu); in kvm_arch_init_vcpu()
1928 ret = kvm_arm_vcpu_finalize(cpu, KVM_ARM_VCPU_SVE); in kvm_arch_init_vcpu()
1938 * in the same 15-bits major 16-bits minor format that PSCI_VERSION in kvm_arch_init_vcpu()
1942 cpu->psci_version = psciver; in kvm_arch_init_vcpu()
1946 * When KVM is in use, PSCI is emulated in-kernel and not by qemu. in kvm_arch_init_vcpu()
1954 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; in kvm_arch_init_vcpu()
1956 return kvm_arm_init_cpreg_list(cpu); in kvm_arch_init_vcpu()
1967 ARMCPU *cpu = ARM_CPU(c); in kvm_inject_arm_sea() local
1968 CPUARMState *env = &cpu->env; in kvm_inject_arm_sea()
1972 c->exception_index = EXCP_DATA_ABORT; in kvm_inject_arm_sea()
1973 env->exception.target_el = 1; in kvm_inject_arm_sea()
1979 same_el = arm_current_el(env) == env->exception.target_el; in kvm_inject_arm_sea()
1982 env->exception.syndrome = esr; in kvm_inject_arm_sea()
1998 CPUARMState *env = &ARM_CPU(cs)->env; in kvm_arch_put_fpsimd()
2004 uint64_t fp_val[2] = { q[1], q[0] }; in kvm_arch_put_fpsimd()
2026 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_put_sve() local
2027 CPUARMState *env = &cpu->env; in kvm_arch_put_sve()
2028 uint64_t tmp[ARM_MAX_VQ * 2]; in kvm_arch_put_sve()
2033 r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); in kvm_arch_put_sve()
2041 r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], in kvm_arch_put_sve()
2042 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); in kvm_arch_put_sve()
2049 r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], in kvm_arch_put_sve()
2050 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); in kvm_arch_put_sve()
2066 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_put_registers() local
2067 CPUARMState *env = &cpu->env; in kvm_arch_put_registers()
2070 * AArch64 registers before pushing them out to 64-bit KVM. in kvm_arch_put_registers()
2078 &env->xregs[i]); in kvm_arch_put_registers()
2089 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); in kvm_arch_put_registers()
2094 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); in kvm_arch_put_registers()
2110 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); in kvm_arch_put_registers()
2115 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); in kvm_arch_put_registers()
2123 * ensure that any modifications to env->spsr are correctly in kvm_arch_put_registers()
2128 i = bank_number(env->uncached_cpsr & CPSR_M); in kvm_arch_put_registers()
2129 env->banked_spsr[i] = env->spsr; in kvm_arch_put_registers()
2132 /* KVM 0-4 map to QEMU banks 1-5 */ in kvm_arch_put_registers()
2135 &env->banked_spsr[i + 1]); in kvm_arch_put_registers()
2141 if (cpu_isar_feature(aa64_sve, cpu)) { in kvm_arch_put_registers()
2162 write_cpustate_to_list(cpu, true); in kvm_arch_put_registers()
2164 if (!write_list_to_kvmstate(cpu, level)) { in kvm_arch_put_registers()
2165 return -EINVAL; in kvm_arch_put_registers()
2173 ret = kvm_put_vcpu_events(cpu); in kvm_arch_put_registers()
2178 return kvm_arm_sync_mpstate_to_kvm(cpu); in kvm_arch_put_registers()
2183 CPUARMState *env = &ARM_CPU(cs)->env; in kvm_arch_get_fpsimd()
2210 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_get_sve() local
2211 CPUARMState *env = &cpu->env; in kvm_arch_get_sve()
2216 r = &env->vfp.zregs[n].d[0]; in kvm_arch_get_sve()
2221 sve_bswap64(r, r, cpu->sve_max_vq * 2); in kvm_arch_get_sve()
2225 r = &env->vfp.pregs[n].p[0]; in kvm_arch_get_sve()
2230 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); in kvm_arch_get_sve()
2233 r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; in kvm_arch_get_sve()
2238 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); in kvm_arch_get_sve()
2250 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_get_registers() local
2251 CPUARMState *env = &cpu->env; in kvm_arch_get_registers()
2255 &env->xregs[i]); in kvm_arch_get_registers()
2261 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); in kvm_arch_get_registers()
2266 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); in kvm_arch_get_registers()
2276 env->aarch64 = ((val & PSTATE_nRW) == 0); in kvm_arch_get_registers()
2288 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); in kvm_arch_get_registers()
2294 * incoming AArch64 regs received from 64-bit KVM. in kvm_arch_get_registers()
2302 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); in kvm_arch_get_registers()
2309 * KVM SPSRs 0-4 map to QEMU banks 1-5 in kvm_arch_get_registers()
2313 &env->banked_spsr[i + 1]); in kvm_arch_get_registers()
2321 i = bank_number(env->uncached_cpsr & CPSR_M); in kvm_arch_get_registers()
2322 env->spsr = env->banked_spsr[i]; in kvm_arch_get_registers()
2325 if (cpu_isar_feature(aa64_sve, cpu)) { in kvm_arch_get_registers()
2346 ret = kvm_get_vcpu_events(cpu); in kvm_arch_get_registers()
2351 if (!write_kvmstate_to_list(cpu)) { in kvm_arch_get_registers()
2352 return -EINVAL; in kvm_arch_get_registers()
2357 write_list_to_cpustate(cpu); in kvm_arch_get_registers()
2359 ret = kvm_arm_sync_mpstate_to_qemu(cpu); in kvm_arch_get_registers()
2375 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { in kvm_arch_on_sigbus_vcpu()
2416 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || in kvm_arch_insert_sw_breakpoint()
2417 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { in kvm_arch_insert_sw_breakpoint()
2418 return -EINVAL; in kvm_arch_insert_sw_breakpoint()
2427 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || in kvm_arch_remove_sw_breakpoint()
2429 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { in kvm_arch_remove_sw_breakpoint()
2430 return -EINVAL; in kvm_arch_remove_sw_breakpoint()
2440 ARMCPU *cpu = ARM_CPU(cpuobj); in kvm_arm_enable_mte() local
2445 * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make in kvm_arm_enable_mte()
2452 error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE"); in kvm_arm_enable_mte()
2468 cpu->kvm_mte = true; in kvm_arm_enable_mte()