Lines Matching +full:cs +full:- +full:3

4  * Copyright Christoffer Dall 2009-2010
5 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
9 * See the COPYING file in the top-level directory.
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
33 #include "exec/address-spaces.h"
80 init.target = cpu->kvm_target; in kvm_arm_vcpu_init()
81 memcpy(init.features, cpu->kvm_init_features, sizeof(init.features)); in kvm_arm_vcpu_init()
107 int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1; in kvm_arm_create_scratch_host_vcpu()
120 } while (vmfd == -1 && errno == EINTR); in kvm_arm_create_scratch_host_vcpu()
149 if (init->target == -1) { in kvm_arm_create_scratch_host_vcpu()
154 init->target = preferred.target; in kvm_arm_create_scratch_host_vcpu()
172 memcpy(try.features, init->features, sizeof(init->features)); in kvm_arm_create_scratch_host_vcpu()
181 init->target = try.target; in kvm_arm_create_scratch_host_vcpu()
215 for (i = 2; i >= 0; i--) { in kvm_arm_destroy_scratch_host_vcpu()
229 return -1; in read_sys_reg32()
256 int fdarray[3]; in kvm_arm_get_host_cpu_features()
274 * target = -1 informs kvm_arm_create_scratch_host_vcpu() in kvm_arm_get_host_cpu_features()
277 struct kvm_vcpu_init init = { .target = -1, }; in kvm_arm_get_host_cpu_features()
307 ahcf->target = init.target; in kvm_arm_get_host_cpu_features()
308 ahcf->dtb_compatible = "arm,arm-v8"; in kvm_arm_get_host_cpu_features()
310 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, in kvm_arm_get_host_cpu_features()
311 ARM64_SYS_REG(3, 0, 0, 4, 0)); in kvm_arm_get_host_cpu_features()
324 * run the tiniest of hand-crafted kernels to extract in kvm_arm_get_host_cpu_features()
329 ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ in kvm_arm_get_host_cpu_features()
332 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, in kvm_arm_get_host_cpu_features()
333 ARM64_SYS_REG(3, 0, 0, 4, 1)); in kvm_arm_get_host_cpu_features()
334 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, in kvm_arm_get_host_cpu_features()
335 ARM64_SYS_REG(3, 0, 0, 4, 5)); in kvm_arm_get_host_cpu_features()
336 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, in kvm_arm_get_host_cpu_features()
337 ARM64_SYS_REG(3, 0, 0, 5, 0)); in kvm_arm_get_host_cpu_features()
338 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, in kvm_arm_get_host_cpu_features()
339 ARM64_SYS_REG(3, 0, 0, 5, 1)); in kvm_arm_get_host_cpu_features()
340 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, in kvm_arm_get_host_cpu_features()
341 ARM64_SYS_REG(3, 0, 0, 6, 0)); in kvm_arm_get_host_cpu_features()
342 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, in kvm_arm_get_host_cpu_features()
343 ARM64_SYS_REG(3, 0, 0, 6, 1)); in kvm_arm_get_host_cpu_features()
344 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, in kvm_arm_get_host_cpu_features()
345 ARM64_SYS_REG(3, 0, 0, 6, 2)); in kvm_arm_get_host_cpu_features()
346 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, in kvm_arm_get_host_cpu_features()
347 ARM64_SYS_REG(3, 0, 0, 7, 0)); in kvm_arm_get_host_cpu_features()
348 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, in kvm_arm_get_host_cpu_features()
349 ARM64_SYS_REG(3, 0, 0, 7, 1)); in kvm_arm_get_host_cpu_features()
350 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, in kvm_arm_get_host_cpu_features()
351 ARM64_SYS_REG(3, 0, 0, 7, 2)); in kvm_arm_get_host_cpu_features()
352 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr3, in kvm_arm_get_host_cpu_features()
353 ARM64_SYS_REG(3, 0, 0, 7, 3)); in kvm_arm_get_host_cpu_features()
362 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, in kvm_arm_get_host_cpu_features()
363 ARM64_SYS_REG(3, 0, 0, 1, 0)); in kvm_arm_get_host_cpu_features()
364 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, in kvm_arm_get_host_cpu_features()
365 ARM64_SYS_REG(3, 0, 0, 1, 1)); in kvm_arm_get_host_cpu_features()
366 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, in kvm_arm_get_host_cpu_features()
367 ARM64_SYS_REG(3, 0, 0, 1, 2)); in kvm_arm_get_host_cpu_features()
368 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, in kvm_arm_get_host_cpu_features()
369 ARM64_SYS_REG(3, 0, 0, 1, 4)); in kvm_arm_get_host_cpu_features()
370 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, in kvm_arm_get_host_cpu_features()
371 ARM64_SYS_REG(3, 0, 0, 1, 5)); in kvm_arm_get_host_cpu_features()
372 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, in kvm_arm_get_host_cpu_features()
373 ARM64_SYS_REG(3, 0, 0, 1, 6)); in kvm_arm_get_host_cpu_features()
374 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, in kvm_arm_get_host_cpu_features()
375 ARM64_SYS_REG(3, 0, 0, 1, 7)); in kvm_arm_get_host_cpu_features()
376 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, in kvm_arm_get_host_cpu_features()
377 ARM64_SYS_REG(3, 0, 0, 2, 0)); in kvm_arm_get_host_cpu_features()
378 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, in kvm_arm_get_host_cpu_features()
379 ARM64_SYS_REG(3, 0, 0, 2, 1)); in kvm_arm_get_host_cpu_features()
380 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, in kvm_arm_get_host_cpu_features()
381 ARM64_SYS_REG(3, 0, 0, 2, 2)); in kvm_arm_get_host_cpu_features()
382 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, in kvm_arm_get_host_cpu_features()
383 ARM64_SYS_REG(3, 0, 0, 2, 3)); in kvm_arm_get_host_cpu_features()
384 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, in kvm_arm_get_host_cpu_features()
385 ARM64_SYS_REG(3, 0, 0, 2, 4)); in kvm_arm_get_host_cpu_features()
386 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, in kvm_arm_get_host_cpu_features()
387 ARM64_SYS_REG(3, 0, 0, 2, 5)); in kvm_arm_get_host_cpu_features()
388 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, in kvm_arm_get_host_cpu_features()
389 ARM64_SYS_REG(3, 0, 0, 2, 6)); in kvm_arm_get_host_cpu_features()
390 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, in kvm_arm_get_host_cpu_features()
391 ARM64_SYS_REG(3, 0, 0, 2, 7)); in kvm_arm_get_host_cpu_features()
393 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, in kvm_arm_get_host_cpu_features()
394 ARM64_SYS_REG(3, 0, 0, 3, 0)); in kvm_arm_get_host_cpu_features()
395 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, in kvm_arm_get_host_cpu_features()
396 ARM64_SYS_REG(3, 0, 0, 3, 1)); in kvm_arm_get_host_cpu_features()
397 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, in kvm_arm_get_host_cpu_features()
398 ARM64_SYS_REG(3, 0, 0, 3, 2)); in kvm_arm_get_host_cpu_features()
399 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, in kvm_arm_get_host_cpu_features()
400 ARM64_SYS_REG(3, 0, 0, 3, 4)); in kvm_arm_get_host_cpu_features()
401 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, in kvm_arm_get_host_cpu_features()
402 ARM64_SYS_REG(3, 0, 0, 3, 5)); in kvm_arm_get_host_cpu_features()
403 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, in kvm_arm_get_host_cpu_features()
404 ARM64_SYS_REG(3, 0, 0, 3, 6)); in kvm_arm_get_host_cpu_features()
408 * provide an accessor for it in 64-bit mode, which is what this in kvm_arm_get_host_cpu_features()
409 * scratch VM is in, and there's no architected "64-bit sysreg in kvm_arm_get_host_cpu_features()
410 * which reads the same as the 32-bit register" the way there is in kvm_arm_get_host_cpu_features()
416 if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { in kvm_arm_get_host_cpu_features()
417 int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); in kvm_arm_get_host_cpu_features()
418 int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); in kvm_arm_get_host_cpu_features()
420 FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); in kvm_arm_get_host_cpu_features()
423 !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); in kvm_arm_get_host_cpu_features()
433 ahcf->isar.dbgdidr = dbgdidr; in kvm_arm_get_host_cpu_features()
438 err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, in kvm_arm_get_host_cpu_features()
439 ARM64_SYS_REG(3, 3, 9, 12, 0)); in kvm_arm_get_host_cpu_features()
450 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, in kvm_arm_get_host_cpu_features()
451 ARM64_SYS_REG(3, 0, 0, 4, 4)); in kvm_arm_get_host_cpu_features()
471 ahcf->features = features; in kvm_arm_get_host_cpu_features()
478 CPUARMState *env = &cpu->env; in kvm_arm_set_cpu_features_from_host()
486 cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; in kvm_arm_set_cpu_features_from_host()
487 cpu->host_cpu_probe_failed = true; in kvm_arm_set_cpu_features_from_host()
492 cpu->kvm_target = arm_host_cpu_features.target; in kvm_arm_set_cpu_features_from_host()
493 cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible; in kvm_arm_set_cpu_features_from_host()
494 cpu->isar = arm_host_cpu_features.isar; in kvm_arm_set_cpu_features_from_host()
495 env->features = arm_host_cpu_features.features; in kvm_arm_set_cpu_features_from_host()
500 return !ARM_CPU(obj)->kvm_adjvtime; in kvm_no_adjvtime_get()
505 ARM_CPU(obj)->kvm_adjvtime = !value; in kvm_no_adjvtime_set()
510 return ARM_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF; in kvm_steal_time_get()
515 ARM_CPU(obj)->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; in kvm_steal_time_set()
518 /* KVM VCPU properties should be prefixed with "kvm-". */
521 CPUARMState *env = &cpu->env; in kvm_arm_add_vcpu_properties()
525 cpu->kvm_adjvtime = true; in kvm_arm_add_vcpu_properties()
526 object_property_add_bool(obj, "kvm-no-adjvtime", kvm_no_adjvtime_get, in kvm_arm_add_vcpu_properties()
528 object_property_set_description(obj, "kvm-no-adjvtime", in kvm_arm_add_vcpu_properties()
534 cpu->kvm_steal_time = ON_OFF_AUTO_AUTO; in kvm_arm_add_vcpu_properties()
535 object_property_add_bool(obj, "kvm-steal-time", kvm_steal_time_get, in kvm_arm_add_vcpu_properties()
537 object_property_set_description(obj, "kvm-steal-time", in kvm_arm_add_vcpu_properties()
548 KVMState *s = KVM_STATE(ms->accelerator); in kvm_arm_get_max_vm_ipa_size()
568 * whether we are using an in-kernel VGIC or not. in kvm_arch_init()
584 if (ms->smp.cpus > 256 && in kvm_arch_init()
588 ret = -EINVAL; in kvm_arch_init()
601 if (s->kvm_eager_split_size) { in kvm_arch_init()
606 s->kvm_eager_split_size = 0; in kvm_arch_init()
608 } else if (!(s->kvm_eager_split_size & sizes)) { in kvm_arch_init()
610 ret = -EINVAL; in kvm_arch_init()
613 s->kvm_eager_split_size); in kvm_arch_init()
616 strerror(-ret)); in kvm_arch_init()
634 return cpu->cpu_index; in kvm_arch_vcpu_id()
666 if (section->mr == kd->mr) { in kvm_arm_devlistener_add()
667 kd->kda.addr = section->offset_within_address_space; in kvm_arm_devlistener_add()
678 if (section->mr == kd->mr) { in kvm_arm_devlistener_del()
679 kd->kda.addr = -1; in kvm_arm_devlistener_del()
685 .name = "kvm-arm",
693 struct kvm_device_attr *attr = &kd->kdattr; in kvm_arm_set_device_addr()
695 uint64_t addr = kd->kda.addr; in kvm_arm_set_device_addr()
697 addr |= kd->kda_addr_ormask; in kvm_arm_set_device_addr()
698 attr->addr = (uintptr_t)&addr; in kvm_arm_set_device_addr()
699 ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr); in kvm_arm_set_device_addr()
703 strerror(-ret)); in kvm_arm_set_device_addr()
713 if (kd->kda.addr != -1) { in kvm_arm_machine_init_done()
716 memory_region_unref(kd->mr); in kvm_arm_machine_init_done()
741 kd->mr = mr; in kvm_arm_register_device()
742 kd->kda.id = devid; in kvm_arm_register_device()
743 kd->kda.addr = -1; in kvm_arm_register_device()
744 kd->kdattr.flags = 0; in kvm_arm_register_device()
745 kd->kdattr.group = group; in kvm_arm_register_device()
746 kd->kdattr.attr = attr; in kvm_arm_register_device()
747 kd->dev_fd = dev_fd; in kvm_arm_register_device()
748 kd->kda_addr_ormask = addr_ormask; in kvm_arm_register_device()
750 memory_region_ref(kd->mr); in kvm_arm_register_device()
759 return -1; in compare_u64()
773 res = bsearch(&regidx, cpu->cpreg_indexes, cpu->cpreg_array_len, in kvm_arm_get_cpreg_ptr()
777 return &cpu->cpreg_values[res - cpu->cpreg_indexes]; in kvm_arm_get_cpreg_ptr()
805 * the previous TCG-created cpreg list).
814 CPUState *cs = CPU(cpu); in kvm_arm_init_cpreg_list() local
817 ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl); in kvm_arm_init_cpreg_list()
818 if (ret != -E2BIG) { in kvm_arm_init_cpreg_list()
822 rlp->n = rl.n; in kvm_arm_init_cpreg_list()
823 ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp); in kvm_arm_init_cpreg_list()
830 qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64); in kvm_arm_init_cpreg_list()
832 for (i = 0, arraylen = 0; i < rlp->n; i++) { in kvm_arm_init_cpreg_list()
833 if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) { in kvm_arm_init_cpreg_list()
836 switch (rlp->reg[i] & KVM_REG_SIZE_MASK) { in kvm_arm_init_cpreg_list()
842 ret = -EINVAL; in kvm_arm_init_cpreg_list()
849 cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen); in kvm_arm_init_cpreg_list()
850 cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen); in kvm_arm_init_cpreg_list()
851 cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes, in kvm_arm_init_cpreg_list()
853 cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values, in kvm_arm_init_cpreg_list()
855 cpu->cpreg_array_len = arraylen; in kvm_arm_init_cpreg_list()
856 cpu->cpreg_vmstate_array_len = arraylen; in kvm_arm_init_cpreg_list()
858 for (i = 0, arraylen = 0; i < rlp->n; i++) { in kvm_arm_init_cpreg_list()
859 uint64_t regidx = rlp->reg[i]; in kvm_arm_init_cpreg_list()
863 cpu->cpreg_indexes[arraylen] = regidx; in kvm_arm_init_cpreg_list()
866 assert(cpu->cpreg_array_len == arraylen); in kvm_arm_init_cpreg_list()
873 ret = -EINVAL; in kvm_arm_init_cpreg_list()
906 CPUState *cs = CPU(cpu); in write_kvmstate_to_list() local
910 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_kvmstate_to_list()
911 uint64_t regidx = cpu->cpreg_indexes[i]; in write_kvmstate_to_list()
917 ret = kvm_get_one_reg(cs, regidx, &v32); in write_kvmstate_to_list()
919 cpu->cpreg_values[i] = v32; in write_kvmstate_to_list()
923 ret = kvm_get_one_reg(cs, regidx, cpu->cpreg_values + i); in write_kvmstate_to_list()
937 CPUState *cs = CPU(cpu); in write_list_to_kvmstate() local
941 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_list_to_kvmstate()
942 uint64_t regidx = cpu->cpreg_indexes[i]; in write_list_to_kvmstate()
952 v32 = cpu->cpreg_values[i]; in write_list_to_kvmstate()
953 ret = kvm_set_one_reg(cs, regidx, &v32); in write_list_to_kvmstate()
956 ret = kvm_set_one_reg(cs, regidx, cpu->cpreg_values + i); in write_list_to_kvmstate()
975 if (cpu->kvm_vtime_dirty) { in kvm_arm_cpu_pre_save()
976 *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT) = cpu->kvm_vtime; in kvm_arm_cpu_pre_save()
983 if (cpu->kvm_adjvtime) { in kvm_arm_cpu_post_load()
984 cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT); in kvm_arm_cpu_post_load()
985 cpu->kvm_vtime_dirty = true; in kvm_arm_cpu_post_load()
993 /* Re-init VCPU so that all registers are set to in kvm_arm_reset_vcpu()
998 fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret)); in kvm_arm_reset_vcpu()
1022 .mp_state = (cpu->power_state == PSCI_OFF) ? in kvm_arm_sync_mpstate_to_kvm()
1041 cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ? in kvm_arm_sync_mpstate_to_qemu()
1057 if (cpu->kvm_vtime_dirty) { in kvm_arm_get_virtual_time()
1061 ret = kvm_get_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); in kvm_arm_get_virtual_time()
1067 cpu->kvm_vtime_dirty = true; in kvm_arm_get_virtual_time()
1080 if (!cpu->kvm_vtime_dirty) { in kvm_arm_put_virtual_time()
1084 ret = kvm_set_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); in kvm_arm_put_virtual_time()
1090 cpu->kvm_vtime_dirty = false; in kvm_arm_put_virtual_time()
1103 CPUARMState *env = &cpu->env; in kvm_put_vcpu_events()
1112 events.exception.serror_pending = env->serror.pending; in kvm_put_vcpu_events()
1118 events.exception.serror_has_esr = env->serror.has_esr; in kvm_put_vcpu_events()
1119 events.exception.serror_esr = env->serror.esr; in kvm_put_vcpu_events()
1140 CPUARMState *env = &cpu->env; in kvm_get_vcpu_events()
1155 env->serror.pending = events.exception.serror_pending; in kvm_get_vcpu_events()
1156 env->serror.has_esr = events.exception.serror_has_esr; in kvm_get_vcpu_events()
1157 env->serror.esr = events.exception.serror_esr; in kvm_get_vcpu_events()
1162 #define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1163 #define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1171 * FS[4] - DFSR[10]
1172 * FS[3:0] - DFSR[3:0]
1193 CPUState *cs = CPU(cpu); in kvm_arm_verify_ext_dabt_pending() local
1196 if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { in kvm_arm_verify_ext_dabt_pending()
1197 CPUARMState *env = &cpu->env; in kvm_arm_verify_ext_dabt_pending()
1204 if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { in kvm_arm_verify_ext_dabt_pending()
1219 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) in kvm_arch_pre_run() argument
1221 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_pre_run()
1222 CPUARMState *env = &cpu->env; in kvm_arch_pre_run()
1224 if (unlikely(env->ext_dabt_raised)) { in kvm_arch_pre_run()
1227 * otherwise risking indefinitely re-running the faulting instruction in kvm_arch_pre_run()
1230 * an IMPLEMENTATION DEFINED exception (for 32-bit EL1) in kvm_arch_pre_run()
1242 env->ext_dabt_raised = 0; in kvm_arch_pre_run()
1246 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) in kvm_arch_post_run() argument
1253 * We only need to sync timer states with user-space interrupt in kvm_arch_post_run()
1259 cpu = ARM_CPU(cs); in kvm_arch_post_run()
1261 /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */ in kvm_arch_post_run()
1262 if (run->s.regs.device_irq_level != cpu->device_irq_level) { in kvm_arch_post_run()
1263 switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level; in kvm_arch_post_run()
1268 qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT], in kvm_arch_post_run()
1269 !!(run->s.regs.device_irq_level & in kvm_arch_post_run()
1275 qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS], in kvm_arch_post_run()
1276 !!(run->s.regs.device_irq_level & in kvm_arch_post_run()
1282 qemu_set_irq(cpu->pmu_interrupt, in kvm_arch_post_run()
1283 !!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU)); in kvm_arch_post_run()
1288 qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n", in kvm_arch_post_run()
1293 cpu->device_irq_level = run->s.regs.device_irq_level; in kvm_arch_post_run()
1305 if (cpu->kvm_adjvtime) { in kvm_arm_vm_state_change()
1309 if (cpu->kvm_adjvtime) { in kvm_arm_vm_state_change()
1319 * ISV bit set to '0b0' -> no valid instruction syndrome
1327 CPUARMState *env = &cpu->env; in kvm_arm_handle_dabt_nisv()
1343 env->ext_dabt_raised = 1; in kvm_arm_handle_dabt_nisv()
1352 return -1; in kvm_arm_handle_dabt_nisv()
1364 * To minimise translating between kernel and user-space the kernel
1365 * ABI just provides user-space with the full exception syndrome
1371 int hsr_ec = syn_get_ec(debug_exit->hsr); in kvm_arm_handle_debug()
1372 CPUState *cs = CPU(cpu); in kvm_arm_handle_debug() local
1373 CPUARMState *env = &cpu->env; in kvm_arm_handle_debug()
1376 kvm_cpu_synchronize_state(cs); in kvm_arm_handle_debug()
1380 if (cs->singlestep_enabled) { in kvm_arm_handle_debug()
1387 error_report("%s: guest single-step while debugging unsupported" in kvm_arm_handle_debug()
1389 __func__, env->pc, debug_exit->hsr); in kvm_arm_handle_debug()
1394 if (kvm_find_sw_breakpoint(cs, env->pc)) { in kvm_arm_handle_debug()
1399 if (find_hw_breakpoint(cs, env->pc)) { in kvm_arm_handle_debug()
1405 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); in kvm_arm_handle_debug()
1407 cs->watchpoint_hit = wp; in kvm_arm_handle_debug()
1414 __func__, debug_exit->hsr, env->pc); in kvm_arm_handle_debug()
1418 * the guest. Let's re-use the existing TCG interrupt code to set in kvm_arm_handle_debug()
1421 cs->exception_index = EXCP_BKPT; in kvm_arm_handle_debug()
1422 env->exception.syndrome = debug_exit->hsr; in kvm_arm_handle_debug()
1423 env->exception.vaddress = debug_exit->far; in kvm_arm_handle_debug()
1424 env->exception.target_el = 1; in kvm_arm_handle_debug()
1426 arm_cpu_do_interrupt(cs); in kvm_arm_handle_debug()
1432 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) in kvm_arch_handle_exit() argument
1434 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_handle_exit()
1437 switch (run->exit_reason) { in kvm_arch_handle_exit()
1439 if (kvm_arm_handle_debug(cpu, &run->debug.arch)) { in kvm_arch_handle_exit()
1445 ret = kvm_arm_handle_dabt_nisv(cpu, run->arm_nisv.esr_iss, in kvm_arch_handle_exit()
1446 run->arm_nisv.fault_ipa); in kvm_arch_handle_exit()
1449 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n", in kvm_arch_handle_exit()
1450 __func__, run->exit_reason); in kvm_arch_handle_exit()
1456 bool kvm_arch_stop_on_emulation_error(CPUState *cs) in kvm_arch_stop_on_emulation_error() argument
1461 int kvm_arch_process_async_events(CPUState *cs) in kvm_arch_process_async_events() argument
1491 ptr->dbg_wcr[i] = wp->wcr; in kvm_arm_copy_hw_debug_data()
1492 ptr->dbg_wvr[i] = wp->wvr; in kvm_arm_copy_hw_debug_data()
1496 ptr->dbg_bcr[i] = bp->bcr; in kvm_arm_copy_hw_debug_data()
1497 ptr->dbg_bvr[i] = bp->bvr; in kvm_arm_copy_hw_debug_data()
1501 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) in kvm_arch_update_guest_debug() argument
1503 if (kvm_sw_breakpoints_active(cs)) { in kvm_arch_update_guest_debug()
1504 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; in kvm_arch_update_guest_debug()
1506 if (kvm_arm_hw_debug_active(ARM_CPU(cs))) { in kvm_arch_update_guest_debug()
1507 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW; in kvm_arch_update_guest_debug()
1508 kvm_arm_copy_hw_debug_data(&dbg->arch); in kvm_arch_update_guest_debug()
1519 error_report("-machine kernel_irqchip=split is not supported on ARM."); in kvm_arch_irqchip_create()
1588 route->u.msi.address_lo = doorbell_gpa; in kvm_arch_fixup_msi_route()
1589 route->u.msi.address_hi = doorbell_gpa >> 32; in kvm_arch_fixup_msi_route()
1609 return (data - 32) & 0xffff; in kvm_arch_msi_data_to_gsi()
1617 uint64_t value = s->kvm_eager_split_size; in kvm_arch_get_eager_split_size()
1629 if (s->fd != -1) { in kvm_arch_set_eager_split_size()
1630 error_setg(errp, "Unable to set early-split-size after KVM has been initialized"); in kvm_arch_set_eager_split_size()
1639 error_setg(errp, "early-split-size must be a power of two"); in kvm_arch_set_eager_split_size()
1643 s->kvm_eager_split_size = value; in kvm_arch_set_eager_split_size()
1648 object_class_property_add(oc, "eager-split-size", "size", in kvm_arch_accel_class_init()
1652 object_class_property_set_description(oc, "eager-split-size", in kvm_arch_accel_class_init()
1667 return -ENOSYS; in kvm_arch_insert_hw_breakpoint()
1681 return -ENOSYS; in kvm_arch_remove_hw_breakpoint()
1702 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); in kvm_arm_set_device_attr()
1708 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); in kvm_arm_set_device_attr()
1722 if (!cpu->has_pmu) { in kvm_arm_pmu_init()
1739 if (!cpu->has_pmu) { in kvm_arm_pmu_set_irq()
1756 if (cpu->kvm_steal_time == ON_OFF_AUTO_OFF) { in kvm_arm_pvtime_init()
1769 if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { in kvm_arm_steal_time_finalize()
1770 if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { in kvm_arm_steal_time_finalize()
1771 cpu->kvm_steal_time = ON_OFF_AUTO_OFF; in kvm_arm_steal_time_finalize()
1773 cpu->kvm_steal_time = ON_OFF_AUTO_ON; in kvm_arm_steal_time_finalize()
1775 } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { in kvm_arm_steal_time_finalize()
1777 error_setg(errp, "'kvm-steal-time' cannot be enabled " in kvm_arm_steal_time_finalize()
1780 } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { in kvm_arm_steal_time_finalize()
1788 error_setg(errp, "'kvm-steal-time' cannot be enabled " in kvm_arm_steal_time_finalize()
1827 .target = -1, in kvm_arm_sve_get_vls()
1834 int fdarray[3], ret; in kvm_arm_sve_get_vls()
1850 for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { in kvm_arm_sve_get_vls()
1852 vq = 64 - clz64(vls[i]) + i * 64; in kvm_arm_sve_get_vls()
1868 uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; in kvm_arm_sve_set_vls()
1870 assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); in kvm_arm_sve_set_vls()
1875 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
1877 int kvm_arch_init_vcpu(CPUState *cs) in kvm_arch_init_vcpu() argument
1881 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_init_vcpu()
1882 CPUARMState *env = &cpu->env; in kvm_arch_init_vcpu()
1885 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || in kvm_arch_init_vcpu()
1888 return -EINVAL; in kvm_arch_init_vcpu()
1894 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); in kvm_arch_init_vcpu()
1895 if (cs->start_powered_off) { in kvm_arch_init_vcpu()
1896 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; in kvm_arch_init_vcpu()
1898 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { in kvm_arch_init_vcpu()
1899 cpu->psci_version = QEMU_PSCI_VERSION_0_2; in kvm_arch_init_vcpu()
1900 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; in kvm_arch_init_vcpu()
1903 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; in kvm_arch_init_vcpu()
1905 if (cpu->has_pmu) { in kvm_arch_init_vcpu()
1906 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; in kvm_arch_init_vcpu()
1910 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; in kvm_arch_init_vcpu()
1913 cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | in kvm_arch_init_vcpu()
1938 * in the same 15-bits major 16-bits minor format that PSCI_VERSION in kvm_arch_init_vcpu()
1941 if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { in kvm_arch_init_vcpu()
1942 cpu->psci_version = psciver; in kvm_arch_init_vcpu()
1946 * When KVM is in use, PSCI is emulated in-kernel and not by qemu. in kvm_arch_init_vcpu()
1950 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); in kvm_arch_init_vcpu()
1954 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; in kvm_arch_init_vcpu()
1959 int kvm_arch_destroy_vcpu(CPUState *cs) in kvm_arch_destroy_vcpu() argument
1968 CPUARMState *env = &cpu->env; in kvm_inject_arm_sea()
1972 c->exception_index = EXCP_DATA_ABORT; in kvm_inject_arm_sea()
1973 env->exception.target_el = 1; in kvm_inject_arm_sea()
1979 same_el = arm_current_el(env) == env->exception.target_el; in kvm_inject_arm_sea()
1982 env->exception.syndrome = esr; in kvm_inject_arm_sea()
1996 static int kvm_arch_put_fpsimd(CPUState *cs) in kvm_arch_put_fpsimd() argument
1998 CPUARMState *env = &ARM_CPU(cs)->env; in kvm_arch_put_fpsimd()
2005 ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), in kvm_arch_put_fpsimd()
2008 ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); in kvm_arch_put_fpsimd()
2024 static int kvm_arch_put_sve(CPUState *cs) in kvm_arch_put_sve() argument
2026 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_put_sve()
2027 CPUARMState *env = &cpu->env; in kvm_arch_put_sve()
2033 r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); in kvm_arch_put_sve()
2034 ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); in kvm_arch_put_sve()
2041 r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], in kvm_arch_put_sve()
2042 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); in kvm_arch_put_sve()
2043 ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); in kvm_arch_put_sve()
2049 r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], in kvm_arch_put_sve()
2050 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); in kvm_arch_put_sve()
2051 ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); in kvm_arch_put_sve()
2059 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) in kvm_arch_put_registers() argument
2066 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_put_registers()
2067 CPUARMState *env = &cpu->env; in kvm_arch_put_registers()
2070 * AArch64 registers before pushing them out to 64-bit KVM. in kvm_arch_put_registers()
2077 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), in kvm_arch_put_registers()
2078 &env->xregs[i]); in kvm_arch_put_registers()
2089 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); in kvm_arch_put_registers()
2094 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); in kvm_arch_put_registers()
2105 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); in kvm_arch_put_registers()
2110 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); in kvm_arch_put_registers()
2115 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); in kvm_arch_put_registers()
2123 * ensure that any modifications to env->spsr are correctly in kvm_arch_put_registers()
2128 i = bank_number(env->uncached_cpsr & CPSR_M); in kvm_arch_put_registers()
2129 env->banked_spsr[i] = env->spsr; in kvm_arch_put_registers()
2132 /* KVM 0-4 map to QEMU banks 1-5 */ in kvm_arch_put_registers()
2134 ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]), in kvm_arch_put_registers()
2135 &env->banked_spsr[i + 1]); in kvm_arch_put_registers()
2142 ret = kvm_arch_put_sve(cs); in kvm_arch_put_registers()
2144 ret = kvm_arch_put_fpsimd(cs); in kvm_arch_put_registers()
2151 ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); in kvm_arch_put_registers()
2157 ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); in kvm_arch_put_registers()
2165 return -EINVAL; in kvm_arch_put_registers()
2181 static int kvm_arch_get_fpsimd(CPUState *cs) in kvm_arch_get_fpsimd() argument
2183 CPUARMState *env = &ARM_CPU(cs)->env; in kvm_arch_get_fpsimd()
2188 ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); in kvm_arch_get_fpsimd()
2208 static int kvm_arch_get_sve(CPUState *cs) in kvm_arch_get_sve() argument
2210 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_get_sve()
2211 CPUARMState *env = &cpu->env; in kvm_arch_get_sve()
2216 r = &env->vfp.zregs[n].d[0]; in kvm_arch_get_sve()
2217 ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); in kvm_arch_get_sve()
2221 sve_bswap64(r, r, cpu->sve_max_vq * 2); in kvm_arch_get_sve()
2225 r = &env->vfp.pregs[n].p[0]; in kvm_arch_get_sve()
2226 ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); in kvm_arch_get_sve()
2230 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); in kvm_arch_get_sve()
2233 r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; in kvm_arch_get_sve()
2234 ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); in kvm_arch_get_sve()
2238 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); in kvm_arch_get_sve()
2243 int kvm_arch_get_registers(CPUState *cs, Error **errp) in kvm_arch_get_registers() argument
2250 ARMCPU *cpu = ARM_CPU(cs); in kvm_arch_get_registers()
2251 CPUARMState *env = &cpu->env; in kvm_arch_get_registers()
2254 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), in kvm_arch_get_registers()
2255 &env->xregs[i]); in kvm_arch_get_registers()
2261 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); in kvm_arch_get_registers()
2266 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); in kvm_arch_get_registers()
2271 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); in kvm_arch_get_registers()
2276 env->aarch64 = ((val & PSTATE_nRW) == 0); in kvm_arch_get_registers()
2288 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); in kvm_arch_get_registers()
2294 * incoming AArch64 regs received from 64-bit KVM. in kvm_arch_get_registers()
2302 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); in kvm_arch_get_registers()
2309 * KVM SPSRs 0-4 map to QEMU banks 1-5 in kvm_arch_get_registers()
2312 ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]), in kvm_arch_get_registers()
2313 &env->banked_spsr[i + 1]); in kvm_arch_get_registers()
2321 i = bank_number(env->uncached_cpsr & CPSR_M); in kvm_arch_get_registers()
2322 env->spsr = env->banked_spsr[i]; in kvm_arch_get_registers()
2326 ret = kvm_arch_get_sve(cs); in kvm_arch_get_registers()
2328 ret = kvm_arch_get_fpsimd(cs); in kvm_arch_get_registers()
2334 ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); in kvm_arch_get_registers()
2340 ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); in kvm_arch_get_registers()
2352 return -EINVAL; in kvm_arch_get_registers()
2375 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { in kvm_arch_on_sigbus_vcpu()
2414 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) in kvm_arch_insert_sw_breakpoint() argument
2416 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || in kvm_arch_insert_sw_breakpoint()
2417 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { in kvm_arch_insert_sw_breakpoint()
2418 return -EINVAL; in kvm_arch_insert_sw_breakpoint()
2423 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) in kvm_arch_remove_sw_breakpoint() argument
2427 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || in kvm_arch_remove_sw_breakpoint()
2429 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { in kvm_arch_remove_sw_breakpoint()
2430 return -EINVAL; in kvm_arch_remove_sw_breakpoint()
2445 * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make in kvm_arm_enable_mte()
2452 error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE"); in kvm_arm_enable_mte()
2468 cpu->kvm_mte = true; in kvm_arm_enable_mte()