1fcf5ef2aSThomas Huth /*
2fcf5ef2aSThomas Huth * ARM implementation of KVM hooks
3fcf5ef2aSThomas Huth *
4fcf5ef2aSThomas Huth * Copyright Christoffer Dall 2009-2010
5de3c9601SRichard Henderson * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
6de3c9601SRichard Henderson * Copyright Alex Bennée 2014, Linaro
7fcf5ef2aSThomas Huth *
8fcf5ef2aSThomas Huth * This work is licensed under the terms of the GNU GPL, version 2 or later.
9fcf5ef2aSThomas Huth * See the COPYING file in the top-level directory.
10fcf5ef2aSThomas Huth *
11fcf5ef2aSThomas Huth */
12fcf5ef2aSThomas Huth
13fcf5ef2aSThomas Huth #include "qemu/osdep.h"
14fcf5ef2aSThomas Huth #include <sys/ioctl.h>
15fcf5ef2aSThomas Huth
16fcf5ef2aSThomas Huth #include <linux/kvm.h>
17fcf5ef2aSThomas Huth
18fcf5ef2aSThomas Huth #include "qemu/timer.h"
19fcf5ef2aSThomas Huth #include "qemu/error-report.h"
20db725815SMarkus Armbruster #include "qemu/main-loop.h"
21dea101a1SAndrew Jones #include "qom/object.h"
22dea101a1SAndrew Jones #include "qapi/error.h"
23fcf5ef2aSThomas Huth #include "sysemu/sysemu.h"
24de3c9601SRichard Henderson #include "sysemu/runstate.h"
25fcf5ef2aSThomas Huth #include "sysemu/kvm.h"
26a27382e2SEric Auger #include "sysemu/kvm_int.h"
27fcf5ef2aSThomas Huth #include "kvm_arm.h"
28fcf5ef2aSThomas Huth #include "cpu.h"
29b05c81d2SEric Auger #include "trace.h"
30fcf5ef2aSThomas Huth #include "internals.h"
31b05c81d2SEric Auger #include "hw/pci/pci.h"
32fcf5ef2aSThomas Huth #include "exec/memattrs.h"
33fcf5ef2aSThomas Huth #include "exec/address-spaces.h"
345b7d54d4SAlex Bennée #include "gdbstub/enums.h"
35fcf5ef2aSThomas Huth #include "hw/boards.h"
3664552b6bSMarkus Armbruster #include "hw/irq.h"
37c8f2eb5dSShameer Kolothum #include "qapi/visitor.h"
38fcf5ef2aSThomas Huth #include "qemu/log.h"
39de3c9601SRichard Henderson #include "hw/acpi/acpi.h"
40de3c9601SRichard Henderson #include "hw/acpi/ghes.h"
41f4f318b4SPhilippe Mathieu-Daudé #include "target/arm/gtimer.h"
42918d0de0SCornelia Huck #include "migration/blocker.h"
43fcf5ef2aSThomas Huth
44fcf5ef2aSThomas Huth const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
45*84f298eaSPaolo Bonzini KVM_CAP_INFO(DEVICE_CTRL),
46fcf5ef2aSThomas Huth KVM_CAP_LAST_INFO
47fcf5ef2aSThomas Huth };
48fcf5ef2aSThomas Huth
49fcf5ef2aSThomas Huth static bool cap_has_mp_state;
50202ccb6bSDongjiu Geng static bool cap_has_inject_serror_esr;
51694bcaa8SBeata Michalska static bool cap_has_inject_ext_dabt;
52fcf5ef2aSThomas Huth
53dc40d45eSRichard Henderson /**
54dc40d45eSRichard Henderson * ARMHostCPUFeatures: information about the host CPU (identified
55dc40d45eSRichard Henderson * by asking the host kernel)
56dc40d45eSRichard Henderson */
57dc40d45eSRichard Henderson typedef struct ARMHostCPUFeatures {
58dc40d45eSRichard Henderson ARMISARegisters isar;
59dc40d45eSRichard Henderson uint64_t features;
60dc40d45eSRichard Henderson uint32_t target;
61dc40d45eSRichard Henderson const char *dtb_compatible;
62dc40d45eSRichard Henderson } ARMHostCPUFeatures;
63dc40d45eSRichard Henderson
64c4487d76SPeter Maydell static ARMHostCPUFeatures arm_host_cpu_features;
65c4487d76SPeter Maydell
665a8a6013SRichard Henderson /**
675a8a6013SRichard Henderson * kvm_arm_vcpu_init:
68bbb22d58SPhilippe Mathieu-Daudé * @cpu: ARMCPU
695a8a6013SRichard Henderson *
705a8a6013SRichard Henderson * Initialize (or reinitialize) the VCPU by invoking the
715a8a6013SRichard Henderson * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature
725a8a6013SRichard Henderson * bitmask specified in the CPUState.
735a8a6013SRichard Henderson *
745a8a6013SRichard Henderson * Returns: 0 if success else < 0 error code
755a8a6013SRichard Henderson */
kvm_arm_vcpu_init(ARMCPU * cpu)76bbb22d58SPhilippe Mathieu-Daudé static int kvm_arm_vcpu_init(ARMCPU *cpu)
77fcf5ef2aSThomas Huth {
78fcf5ef2aSThomas Huth struct kvm_vcpu_init init;
79fcf5ef2aSThomas Huth
80fcf5ef2aSThomas Huth init.target = cpu->kvm_target;
81fcf5ef2aSThomas Huth memcpy(init.features, cpu->kvm_init_features, sizeof(init.features));
82fcf5ef2aSThomas Huth
83bbb22d58SPhilippe Mathieu-Daudé return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_INIT, &init);
84fcf5ef2aSThomas Huth }
85fcf5ef2aSThomas Huth
86c223c67aSRichard Henderson /**
87c223c67aSRichard Henderson * kvm_arm_vcpu_finalize:
880d31a631SPhilippe Mathieu-Daudé * @cpu: ARMCPU
89c223c67aSRichard Henderson * @feature: feature to finalize
90c223c67aSRichard Henderson *
91c223c67aSRichard Henderson * Finalizes the configuration of the specified VCPU feature by
92c223c67aSRichard Henderson * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring
93c223c67aSRichard Henderson * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of
94c223c67aSRichard Henderson * KVM's API documentation.
95c223c67aSRichard Henderson *
96c223c67aSRichard Henderson * Returns: 0 if success else < 0 error code
97c223c67aSRichard Henderson */
kvm_arm_vcpu_finalize(ARMCPU * cpu,int feature)980d31a631SPhilippe Mathieu-Daudé static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature)
9914e99e0fSAndrew Jones {
1000d31a631SPhilippe Mathieu-Daudé return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature);
10114e99e0fSAndrew Jones }
10214e99e0fSAndrew Jones
kvm_arm_create_scratch_host_vcpu(const uint32_t * cpus_to_try,int * fdarray,struct kvm_vcpu_init * init)103fcf5ef2aSThomas Huth bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
104fcf5ef2aSThomas Huth int *fdarray,
105fcf5ef2aSThomas Huth struct kvm_vcpu_init *init)
106fcf5ef2aSThomas Huth {
1070cdb4020SAndrew Jones int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
108d26f2f93SMarc Zyngier int max_vm_pa_size;
109fcf5ef2aSThomas Huth
110448058aaSDaniel P. Berrangé kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
111fcf5ef2aSThomas Huth if (kvmfd < 0) {
112fcf5ef2aSThomas Huth goto err;
113fcf5ef2aSThomas Huth }
114d26f2f93SMarc Zyngier max_vm_pa_size = ioctl(kvmfd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE);
115d26f2f93SMarc Zyngier if (max_vm_pa_size < 0) {
116d26f2f93SMarc Zyngier max_vm_pa_size = 0;
117d26f2f93SMarc Zyngier }
118bbde13cdSPeter Maydell do {
119d26f2f93SMarc Zyngier vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
120bbde13cdSPeter Maydell } while (vmfd == -1 && errno == EINTR);
121fcf5ef2aSThomas Huth if (vmfd < 0) {
122fcf5ef2aSThomas Huth goto err;
123fcf5ef2aSThomas Huth }
124918d0de0SCornelia Huck
125918d0de0SCornelia Huck /*
126918d0de0SCornelia Huck * The MTE capability must be enabled by the VMM before creating
127918d0de0SCornelia Huck * any VCPUs in order to allow the MTE bits of the ID_AA64PFR1
128918d0de0SCornelia Huck * register to be probed correctly, as they are masked if MTE
129918d0de0SCornelia Huck * is not enabled.
130918d0de0SCornelia Huck */
131918d0de0SCornelia Huck if (kvm_arm_mte_supported()) {
132918d0de0SCornelia Huck KVMState kvm_state;
133918d0de0SCornelia Huck
134918d0de0SCornelia Huck kvm_state.fd = kvmfd;
135918d0de0SCornelia Huck kvm_state.vmfd = vmfd;
136918d0de0SCornelia Huck kvm_vm_enable_cap(&kvm_state, KVM_CAP_ARM_MTE, 0);
137918d0de0SCornelia Huck }
138918d0de0SCornelia Huck
139fcf5ef2aSThomas Huth cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
140fcf5ef2aSThomas Huth if (cpufd < 0) {
141fcf5ef2aSThomas Huth goto err;
142fcf5ef2aSThomas Huth }
143fcf5ef2aSThomas Huth
144fcf5ef2aSThomas Huth if (!init) {
145fcf5ef2aSThomas Huth /* Caller doesn't want the VCPU to be initialized, so skip it */
146fcf5ef2aSThomas Huth goto finish;
147fcf5ef2aSThomas Huth }
148fcf5ef2aSThomas Huth
1490cdb4020SAndrew Jones if (init->target == -1) {
1500cdb4020SAndrew Jones struct kvm_vcpu_init preferred;
1510cdb4020SAndrew Jones
1520cdb4020SAndrew Jones ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
1530cdb4020SAndrew Jones if (!ret) {
1540cdb4020SAndrew Jones init->target = preferred.target;
1550cdb4020SAndrew Jones }
1560cdb4020SAndrew Jones }
157fcf5ef2aSThomas Huth if (ret >= 0) {
158fcf5ef2aSThomas Huth ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
159fcf5ef2aSThomas Huth if (ret < 0) {
160fcf5ef2aSThomas Huth goto err;
161fcf5ef2aSThomas Huth }
162fcf5ef2aSThomas Huth } else if (cpus_to_try) {
163fcf5ef2aSThomas Huth /* Old kernel which doesn't know about the
164fcf5ef2aSThomas Huth * PREFERRED_TARGET ioctl: we know it will only support
165fcf5ef2aSThomas Huth * creating one kind of guest CPU which is its preferred
166fcf5ef2aSThomas Huth * CPU type.
167fcf5ef2aSThomas Huth */
1680cdb4020SAndrew Jones struct kvm_vcpu_init try;
1690cdb4020SAndrew Jones
170fcf5ef2aSThomas Huth while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
1710cdb4020SAndrew Jones try.target = *cpus_to_try++;
1720cdb4020SAndrew Jones memcpy(try.features, init->features, sizeof(init->features));
1730cdb4020SAndrew Jones ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
174fcf5ef2aSThomas Huth if (ret >= 0) {
175fcf5ef2aSThomas Huth break;
176fcf5ef2aSThomas Huth }
177fcf5ef2aSThomas Huth }
178fcf5ef2aSThomas Huth if (ret < 0) {
179fcf5ef2aSThomas Huth goto err;
180fcf5ef2aSThomas Huth }
1810cdb4020SAndrew Jones init->target = try.target;
182fcf5ef2aSThomas Huth } else {
183fcf5ef2aSThomas Huth /* Treat a NULL cpus_to_try argument the same as an empty
184fcf5ef2aSThomas Huth * list, which means we will fail the call since this must
185fcf5ef2aSThomas Huth * be an old kernel which doesn't support PREFERRED_TARGET.
186fcf5ef2aSThomas Huth */
187fcf5ef2aSThomas Huth goto err;
188fcf5ef2aSThomas Huth }
189fcf5ef2aSThomas Huth
190fcf5ef2aSThomas Huth finish:
191fcf5ef2aSThomas Huth fdarray[0] = kvmfd;
192fcf5ef2aSThomas Huth fdarray[1] = vmfd;
193fcf5ef2aSThomas Huth fdarray[2] = cpufd;
194fcf5ef2aSThomas Huth
195fcf5ef2aSThomas Huth return true;
196fcf5ef2aSThomas Huth
197fcf5ef2aSThomas Huth err:
198fcf5ef2aSThomas Huth if (cpufd >= 0) {
199fcf5ef2aSThomas Huth close(cpufd);
200fcf5ef2aSThomas Huth }
201fcf5ef2aSThomas Huth if (vmfd >= 0) {
202fcf5ef2aSThomas Huth close(vmfd);
203fcf5ef2aSThomas Huth }
204fcf5ef2aSThomas Huth if (kvmfd >= 0) {
205fcf5ef2aSThomas Huth close(kvmfd);
206fcf5ef2aSThomas Huth }
207fcf5ef2aSThomas Huth
208fcf5ef2aSThomas Huth return false;
209fcf5ef2aSThomas Huth }
210fcf5ef2aSThomas Huth
kvm_arm_destroy_scratch_host_vcpu(int * fdarray)211fcf5ef2aSThomas Huth void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
212fcf5ef2aSThomas Huth {
213fcf5ef2aSThomas Huth int i;
214fcf5ef2aSThomas Huth
215fcf5ef2aSThomas Huth for (i = 2; i >= 0; i--) {
216fcf5ef2aSThomas Huth close(fdarray[i]);
217fcf5ef2aSThomas Huth }
218fcf5ef2aSThomas Huth }
219fcf5ef2aSThomas Huth
read_sys_reg32(int fd,uint32_t * pret,uint64_t id)220dc40d45eSRichard Henderson static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
221dc40d45eSRichard Henderson {
222dc40d45eSRichard Henderson uint64_t ret;
223dc40d45eSRichard Henderson struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
224dc40d45eSRichard Henderson int err;
225dc40d45eSRichard Henderson
226dc40d45eSRichard Henderson assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
227dc40d45eSRichard Henderson err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
228dc40d45eSRichard Henderson if (err < 0) {
229dc40d45eSRichard Henderson return -1;
230dc40d45eSRichard Henderson }
231dc40d45eSRichard Henderson *pret = ret;
232dc40d45eSRichard Henderson return 0;
233dc40d45eSRichard Henderson }
234dc40d45eSRichard Henderson
read_sys_reg64(int fd,uint64_t * pret,uint64_t id)235dc40d45eSRichard Henderson static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
236dc40d45eSRichard Henderson {
237dc40d45eSRichard Henderson struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
238dc40d45eSRichard Henderson
239dc40d45eSRichard Henderson assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
240dc40d45eSRichard Henderson return ioctl(fd, KVM_GET_ONE_REG, &idreg);
241dc40d45eSRichard Henderson }
242dc40d45eSRichard Henderson
kvm_arm_pauth_supported(void)243dc40d45eSRichard Henderson static bool kvm_arm_pauth_supported(void)
244dc40d45eSRichard Henderson {
245dc40d45eSRichard Henderson return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) &&
246dc40d45eSRichard Henderson kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC));
247dc40d45eSRichard Henderson }
248dc40d45eSRichard Henderson
kvm_arm_get_host_cpu_features(ARMHostCPUFeatures * ahcf)249dc40d45eSRichard Henderson static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
250dc40d45eSRichard Henderson {
251dc40d45eSRichard Henderson /* Identify the feature bits corresponding to the host CPU, and
252dc40d45eSRichard Henderson * fill out the ARMHostCPUClass fields accordingly. To do this
253dc40d45eSRichard Henderson * we have to create a scratch VM, create a single CPU inside it,
254dc40d45eSRichard Henderson * and then query that CPU for the relevant ID registers.
255dc40d45eSRichard Henderson */
256dc40d45eSRichard Henderson int fdarray[3];
257dc40d45eSRichard Henderson bool sve_supported;
258dc40d45eSRichard Henderson bool pmu_supported = false;
259dc40d45eSRichard Henderson uint64_t features = 0;
260dc40d45eSRichard Henderson int err;
261dc40d45eSRichard Henderson
262dc40d45eSRichard Henderson /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
263dc40d45eSRichard Henderson * we know these will only support creating one kind of guest CPU,
264dc40d45eSRichard Henderson * which is its preferred CPU type. Fortunately these old kernels
265dc40d45eSRichard Henderson * support only a very limited number of CPUs.
266dc40d45eSRichard Henderson */
267dc40d45eSRichard Henderson static const uint32_t cpus_to_try[] = {
268dc40d45eSRichard Henderson KVM_ARM_TARGET_AEM_V8,
269dc40d45eSRichard Henderson KVM_ARM_TARGET_FOUNDATION_V8,
270dc40d45eSRichard Henderson KVM_ARM_TARGET_CORTEX_A57,
271dc40d45eSRichard Henderson QEMU_KVM_ARM_TARGET_NONE
272dc40d45eSRichard Henderson };
273dc40d45eSRichard Henderson /*
274dc40d45eSRichard Henderson * target = -1 informs kvm_arm_create_scratch_host_vcpu()
275dc40d45eSRichard Henderson * to use the preferred target
276dc40d45eSRichard Henderson */
277dc40d45eSRichard Henderson struct kvm_vcpu_init init = { .target = -1, };
278dc40d45eSRichard Henderson
279dc40d45eSRichard Henderson /*
280dc40d45eSRichard Henderson * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
281dc40d45eSRichard Henderson * which is otherwise RAZ.
282dc40d45eSRichard Henderson */
283dc40d45eSRichard Henderson sve_supported = kvm_arm_sve_supported();
284dc40d45eSRichard Henderson if (sve_supported) {
285dc40d45eSRichard Henderson init.features[0] |= 1 << KVM_ARM_VCPU_SVE;
286dc40d45eSRichard Henderson }
287dc40d45eSRichard Henderson
288dc40d45eSRichard Henderson /*
289dc40d45eSRichard Henderson * Ask for Pointer Authentication if supported, so that we get
290dc40d45eSRichard Henderson * the unsanitized field values for AA64ISAR1_EL1.
291dc40d45eSRichard Henderson */
292dc40d45eSRichard Henderson if (kvm_arm_pauth_supported()) {
293dc40d45eSRichard Henderson init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
294dc40d45eSRichard Henderson 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
295dc40d45eSRichard Henderson }
296dc40d45eSRichard Henderson
297dc40d45eSRichard Henderson if (kvm_arm_pmu_supported()) {
298dc40d45eSRichard Henderson init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
299dc40d45eSRichard Henderson pmu_supported = true;
300613d0b8eSAkihiko Odaki features |= 1ULL << ARM_FEATURE_PMU;
301dc40d45eSRichard Henderson }
302dc40d45eSRichard Henderson
303dc40d45eSRichard Henderson if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
304dc40d45eSRichard Henderson return false;
305dc40d45eSRichard Henderson }
306dc40d45eSRichard Henderson
307dc40d45eSRichard Henderson ahcf->target = init.target;
308dc40d45eSRichard Henderson ahcf->dtb_compatible = "arm,arm-v8";
309dc40d45eSRichard Henderson
310dc40d45eSRichard Henderson err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
311dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 4, 0));
312dc40d45eSRichard Henderson if (unlikely(err < 0)) {
313dc40d45eSRichard Henderson /*
314dc40d45eSRichard Henderson * Before v4.15, the kernel only exposed a limited number of system
315dc40d45eSRichard Henderson * registers, not including any of the interesting AArch64 ID regs.
316dc40d45eSRichard Henderson * For the most part we could leave these fields as zero with minimal
317dc40d45eSRichard Henderson * effect, since this does not affect the values seen by the guest.
318dc40d45eSRichard Henderson *
319dc40d45eSRichard Henderson * However, it could cause problems down the line for QEMU,
320dc40d45eSRichard Henderson * so provide a minimal v8.0 default.
321dc40d45eSRichard Henderson *
322dc40d45eSRichard Henderson * ??? Could read MIDR and use knowledge from cpu64.c.
323dc40d45eSRichard Henderson * ??? Could map a page of memory into our temp guest and
324dc40d45eSRichard Henderson * run the tiniest of hand-crafted kernels to extract
325dc40d45eSRichard Henderson * the values seen by the guest.
326dc40d45eSRichard Henderson * ??? Either of these sounds like too much effort just
327dc40d45eSRichard Henderson * to work around running a modern host kernel.
328dc40d45eSRichard Henderson */
329dc40d45eSRichard Henderson ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
330dc40d45eSRichard Henderson err = 0;
331dc40d45eSRichard Henderson } else {
332dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
333dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 4, 1));
334dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0,
335dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 4, 5));
336dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
337dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 5, 0));
338dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
339dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 5, 1));
340dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
341dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 6, 0));
342dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
343dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 6, 1));
344dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2,
345dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 6, 2));
346dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
347dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 7, 0));
348dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
349dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 7, 1));
350dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
351dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 7, 2));
352f7ddd7b6SPeter Maydell err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr3,
353f7ddd7b6SPeter Maydell ARM64_SYS_REG(3, 0, 0, 7, 3));
354dc40d45eSRichard Henderson
355dc40d45eSRichard Henderson /*
356dc40d45eSRichard Henderson * Note that if AArch32 support is not present in the host,
357dc40d45eSRichard Henderson * the AArch32 sysregs are present to be read, but will
358dc40d45eSRichard Henderson * return UNKNOWN values. This is neither better nor worse
359dc40d45eSRichard Henderson * than skipping the reads and leaving 0, as we must avoid
360dc40d45eSRichard Henderson * considering the values in every case.
361dc40d45eSRichard Henderson */
362dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
363dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 1, 0));
364dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
365dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 1, 1));
366dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
367dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 1, 2));
368dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
369dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 1, 4));
370dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
371dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 1, 5));
372dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
373dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 1, 6));
374dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
375dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 1, 7));
376dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
377dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 2, 0));
378dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
379dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 2, 1));
380dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
381dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 2, 2));
382dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
383dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 2, 3));
384dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
385dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 2, 4));
386dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
387dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 2, 5));
388dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
389dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 2, 6));
390dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
391dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 2, 7));
392dc40d45eSRichard Henderson
393dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
394dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 3, 0));
395dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
396dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 3, 1));
397dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
398dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 3, 2));
399dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
400dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 3, 4));
401dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1,
402dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 3, 5));
403dc40d45eSRichard Henderson err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5,
404dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 3, 6));
405dc40d45eSRichard Henderson
406dc40d45eSRichard Henderson /*
407dc40d45eSRichard Henderson * DBGDIDR is a bit complicated because the kernel doesn't
408dc40d45eSRichard Henderson * provide an accessor for it in 64-bit mode, which is what this
409dc40d45eSRichard Henderson * scratch VM is in, and there's no architected "64-bit sysreg
410dc40d45eSRichard Henderson * which reads the same as the 32-bit register" the way there is
411dc40d45eSRichard Henderson * for other ID registers. Instead we synthesize a value from the
412dc40d45eSRichard Henderson * AArch64 ID_AA64DFR0, the same way the kernel code in
413dc40d45eSRichard Henderson * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
414dc40d45eSRichard Henderson * We only do this if the CPU supports AArch32 at EL1.
415dc40d45eSRichard Henderson */
416dc40d45eSRichard Henderson if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
417dc40d45eSRichard Henderson int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
418dc40d45eSRichard Henderson int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
419dc40d45eSRichard Henderson int ctx_cmps =
420dc40d45eSRichard Henderson FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
421dc40d45eSRichard Henderson int version = 6; /* ARMv8 debug architecture */
422dc40d45eSRichard Henderson bool has_el3 =
423dc40d45eSRichard Henderson !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
424dc40d45eSRichard Henderson uint32_t dbgdidr = 0;
425dc40d45eSRichard Henderson
426dc40d45eSRichard Henderson dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
427dc40d45eSRichard Henderson dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
428dc40d45eSRichard Henderson dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
429dc40d45eSRichard Henderson dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
430dc40d45eSRichard Henderson dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
431dc40d45eSRichard Henderson dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
432dc40d45eSRichard Henderson dbgdidr |= (1 << 15); /* RES1 bit */
433dc40d45eSRichard Henderson ahcf->isar.dbgdidr = dbgdidr;
434dc40d45eSRichard Henderson }
435dc40d45eSRichard Henderson
436dc40d45eSRichard Henderson if (pmu_supported) {
437dc40d45eSRichard Henderson /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
438dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
439dc40d45eSRichard Henderson ARM64_SYS_REG(3, 3, 9, 12, 0));
440dc40d45eSRichard Henderson }
441dc40d45eSRichard Henderson
442dc40d45eSRichard Henderson if (sve_supported) {
443dc40d45eSRichard Henderson /*
444dc40d45eSRichard Henderson * There is a range of kernels between kernel commit 73433762fcae
445dc40d45eSRichard Henderson * and f81cb2c3ad41 which have a bug where the kernel doesn't
446dc40d45eSRichard Henderson * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has
447dc40d45eSRichard Henderson * enabled SVE support, which resulted in an error rather than RAZ.
448dc40d45eSRichard Henderson * So only read the register if we set KVM_ARM_VCPU_SVE above.
449dc40d45eSRichard Henderson */
450dc40d45eSRichard Henderson err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
451dc40d45eSRichard Henderson ARM64_SYS_REG(3, 0, 0, 4, 4));
452dc40d45eSRichard Henderson }
453dc40d45eSRichard Henderson }
454dc40d45eSRichard Henderson
455dc40d45eSRichard Henderson kvm_arm_destroy_scratch_host_vcpu(fdarray);
456dc40d45eSRichard Henderson
457dc40d45eSRichard Henderson if (err < 0) {
458dc40d45eSRichard Henderson return false;
459dc40d45eSRichard Henderson }
460dc40d45eSRichard Henderson
461dc40d45eSRichard Henderson /*
462dc40d45eSRichard Henderson * We can assume any KVM supporting CPU is at least a v8
463dc40d45eSRichard Henderson * with VFPv4+Neon; this in turn implies most of the other
464dc40d45eSRichard Henderson * feature bits.
465dc40d45eSRichard Henderson */
466dc40d45eSRichard Henderson features |= 1ULL << ARM_FEATURE_V8;
467dc40d45eSRichard Henderson features |= 1ULL << ARM_FEATURE_NEON;
468dc40d45eSRichard Henderson features |= 1ULL << ARM_FEATURE_AARCH64;
469dc40d45eSRichard Henderson features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
470dc40d45eSRichard Henderson
471dc40d45eSRichard Henderson ahcf->features = features;
472dc40d45eSRichard Henderson
473dc40d45eSRichard Henderson return true;
474dc40d45eSRichard Henderson }
475dc40d45eSRichard Henderson
kvm_arm_set_cpu_features_from_host(ARMCPU * cpu)476c4487d76SPeter Maydell void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
477fcf5ef2aSThomas Huth {
478c4487d76SPeter Maydell CPUARMState *env = &cpu->env;
479fcf5ef2aSThomas Huth
480c4487d76SPeter Maydell if (!arm_host_cpu_features.dtb_compatible) {
481c4487d76SPeter Maydell if (!kvm_enabled() ||
482c4487d76SPeter Maydell !kvm_arm_get_host_cpu_features(&arm_host_cpu_features)) {
483c4487d76SPeter Maydell /* We can't report this error yet, so flag that we need to
484c4487d76SPeter Maydell * in arm_cpu_realizefn().
485fcf5ef2aSThomas Huth */
486c4487d76SPeter Maydell cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
487c4487d76SPeter Maydell cpu->host_cpu_probe_failed = true;
488c4487d76SPeter Maydell return;
489fcf5ef2aSThomas Huth }
490fcf5ef2aSThomas Huth }
491fcf5ef2aSThomas Huth
492c4487d76SPeter Maydell cpu->kvm_target = arm_host_cpu_features.target;
493c4487d76SPeter Maydell cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
4944674097cSRichard Henderson cpu->isar = arm_host_cpu_features.isar;
495c4487d76SPeter Maydell env->features = arm_host_cpu_features.features;
496c4487d76SPeter Maydell }
497c4487d76SPeter Maydell
kvm_no_adjvtime_get(Object * obj,Error ** errp)498dea101a1SAndrew Jones static bool kvm_no_adjvtime_get(Object *obj, Error **errp)
499dea101a1SAndrew Jones {
500dea101a1SAndrew Jones return !ARM_CPU(obj)->kvm_adjvtime;
501dea101a1SAndrew Jones }
502dea101a1SAndrew Jones
kvm_no_adjvtime_set(Object * obj,bool value,Error ** errp)503dea101a1SAndrew Jones static void kvm_no_adjvtime_set(Object *obj, bool value, Error **errp)
504dea101a1SAndrew Jones {
505dea101a1SAndrew Jones ARM_CPU(obj)->kvm_adjvtime = !value;
506dea101a1SAndrew Jones }
507dea101a1SAndrew Jones
kvm_steal_time_get(Object * obj,Error ** errp)50868970d1eSAndrew Jones static bool kvm_steal_time_get(Object *obj, Error **errp)
50968970d1eSAndrew Jones {
51068970d1eSAndrew Jones return ARM_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF;
51168970d1eSAndrew Jones }
51268970d1eSAndrew Jones
kvm_steal_time_set(Object * obj,bool value,Error ** errp)51368970d1eSAndrew Jones static void kvm_steal_time_set(Object *obj, bool value, Error **errp)
51468970d1eSAndrew Jones {
51568970d1eSAndrew Jones ARM_CPU(obj)->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
51668970d1eSAndrew Jones }
51768970d1eSAndrew Jones
518dea101a1SAndrew Jones /* KVM VCPU properties should be prefixed with "kvm-". */
kvm_arm_add_vcpu_properties(ARMCPU * cpu)519cac675b5SPhilippe Mathieu-Daudé void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
520dea101a1SAndrew Jones {
5219e6f8d8aSfangying CPUARMState *env = &cpu->env;
522cac675b5SPhilippe Mathieu-Daudé Object *obj = OBJECT(cpu);
523dea101a1SAndrew Jones
5249e6f8d8aSfangying if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
5259e6f8d8aSfangying cpu->kvm_adjvtime = true;
526dea101a1SAndrew Jones object_property_add_bool(obj, "kvm-no-adjvtime", kvm_no_adjvtime_get,
527d2623129SMarkus Armbruster kvm_no_adjvtime_set);
528dea101a1SAndrew Jones object_property_set_description(obj, "kvm-no-adjvtime",
529dea101a1SAndrew Jones "Set on to disable the adjustment of "
530dea101a1SAndrew Jones "the virtual counter. VM stopped time "
5317eecec7dSMarkus Armbruster "will be counted.");
532dea101a1SAndrew Jones }
53368970d1eSAndrew Jones
53468970d1eSAndrew Jones cpu->kvm_steal_time = ON_OFF_AUTO_AUTO;
53568970d1eSAndrew Jones object_property_add_bool(obj, "kvm-steal-time", kvm_steal_time_get,
53668970d1eSAndrew Jones kvm_steal_time_set);
53768970d1eSAndrew Jones object_property_set_description(obj, "kvm-steal-time",
53868970d1eSAndrew Jones "Set off to disable KVM steal time.");
5399e6f8d8aSfangying }
540dea101a1SAndrew Jones
kvm_arm_pmu_supported(void)5417d20e681SPhilippe Mathieu-Daudé bool kvm_arm_pmu_supported(void)
542ae502508SAndrew Jones {
5437d20e681SPhilippe Mathieu-Daudé return kvm_check_extension(kvm_state, KVM_CAP_ARM_PMU_V3);
544ae502508SAndrew Jones }
545ae502508SAndrew Jones
kvm_arm_get_max_vm_ipa_size(MachineState * ms,bool * fixed_ipa)546bcb902a1SAndrew Jones int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
547a27382e2SEric Auger {
548a27382e2SEric Auger KVMState *s = KVM_STATE(ms->accelerator);
549a27382e2SEric Auger int ret;
550a27382e2SEric Auger
551a27382e2SEric Auger ret = kvm_check_extension(s, KVM_CAP_ARM_VM_IPA_SIZE);
552bcb902a1SAndrew Jones *fixed_ipa = ret <= 0;
553bcb902a1SAndrew Jones
554a27382e2SEric Auger return ret > 0 ? ret : 40;
555a27382e2SEric Auger }
556a27382e2SEric Auger
kvm_arch_get_default_type(MachineState * ms)5575e0d6590SAkihiko Odaki int kvm_arch_get_default_type(MachineState *ms)
5585e0d6590SAkihiko Odaki {
5591ab445afSAkihiko Odaki bool fixed_ipa;
5601ab445afSAkihiko Odaki int size = kvm_arm_get_max_vm_ipa_size(ms, &fixed_ipa);
5611ab445afSAkihiko Odaki return fixed_ipa ? 0 : size;
5625e0d6590SAkihiko Odaki }
5635e0d6590SAkihiko Odaki
kvm_arch_init(MachineState * ms,KVMState * s)564fcf5ef2aSThomas Huth int kvm_arch_init(MachineState *ms, KVMState *s)
565fcf5ef2aSThomas Huth {
566fff9f555SEric Auger int ret = 0;
567fcf5ef2aSThomas Huth /* For ARM interrupt delivery is always asynchronous,
568fcf5ef2aSThomas Huth * whether we are using an in-kernel VGIC or not.
569fcf5ef2aSThomas Huth */
570fcf5ef2aSThomas Huth kvm_async_interrupts_allowed = true;
571fcf5ef2aSThomas Huth
5725d721b78SAlexander Graf /*
5735d721b78SAlexander Graf * PSCI wakes up secondary cores, so we always need to
5745d721b78SAlexander Graf * have vCPUs waiting in kernel space
5755d721b78SAlexander Graf */
5765d721b78SAlexander Graf kvm_halt_in_kernel_allowed = true;
5775d721b78SAlexander Graf
578fcf5ef2aSThomas Huth cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
579fcf5ef2aSThomas Huth
58051641de4SRichard Henderson /* Check whether user space can specify guest syndrome value */
58151641de4SRichard Henderson cap_has_inject_serror_esr =
58251641de4SRichard Henderson kvm_check_extension(s, KVM_CAP_ARM_INJECT_SERROR_ESR);
58351641de4SRichard Henderson
584fff9f555SEric Auger if (ms->smp.cpus > 256 &&
585fff9f555SEric Auger !kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) {
586fff9f555SEric Auger error_report("Using more than 256 vcpus requires a host kernel "
587fff9f555SEric Auger "with KVM_CAP_ARM_IRQ_LINE_LAYOUT_2");
588fff9f555SEric Auger ret = -EINVAL;
589fff9f555SEric Auger }
590fff9f555SEric Auger
591694bcaa8SBeata Michalska if (kvm_check_extension(s, KVM_CAP_ARM_NISV_TO_USER)) {
592694bcaa8SBeata Michalska if (kvm_vm_enable_cap(s, KVM_CAP_ARM_NISV_TO_USER, 0)) {
593694bcaa8SBeata Michalska error_report("Failed to enable KVM_CAP_ARM_NISV_TO_USER cap");
594694bcaa8SBeata Michalska } else {
595694bcaa8SBeata Michalska /* Set status for supporting the external dabt injection */
596694bcaa8SBeata Michalska cap_has_inject_ext_dabt = kvm_check_extension(s,
597694bcaa8SBeata Michalska KVM_CAP_ARM_INJECT_EXT_DABT);
598694bcaa8SBeata Michalska }
599694bcaa8SBeata Michalska }
600694bcaa8SBeata Michalska
601c8f2eb5dSShameer Kolothum if (s->kvm_eager_split_size) {
602c8f2eb5dSShameer Kolothum uint32_t sizes;
603c8f2eb5dSShameer Kolothum
604c8f2eb5dSShameer Kolothum sizes = kvm_vm_check_extension(s, KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES);
605c8f2eb5dSShameer Kolothum if (!sizes) {
606c8f2eb5dSShameer Kolothum s->kvm_eager_split_size = 0;
607c8f2eb5dSShameer Kolothum warn_report("Eager Page Split support not available");
608c8f2eb5dSShameer Kolothum } else if (!(s->kvm_eager_split_size & sizes)) {
609c8f2eb5dSShameer Kolothum error_report("Eager Page Split requested chunk size not valid");
610c8f2eb5dSShameer Kolothum ret = -EINVAL;
611c8f2eb5dSShameer Kolothum } else {
612c8f2eb5dSShameer Kolothum ret = kvm_vm_enable_cap(s, KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE, 0,
613c8f2eb5dSShameer Kolothum s->kvm_eager_split_size);
614c8f2eb5dSShameer Kolothum if (ret < 0) {
615c8f2eb5dSShameer Kolothum error_report("Enabling of Eager Page Split failed: %s",
616c8f2eb5dSShameer Kolothum strerror(-ret));
617c8f2eb5dSShameer Kolothum }
618c8f2eb5dSShameer Kolothum }
619c8f2eb5dSShameer Kolothum }
620c8f2eb5dSShameer Kolothum
621dd2157d2SRichard Henderson max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS);
622dd2157d2SRichard Henderson hw_watchpoints = g_array_sized_new(true, true,
623dd2157d2SRichard Henderson sizeof(HWWatchpoint), max_hw_wps);
624dd2157d2SRichard Henderson
625dd2157d2SRichard Henderson max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS);
626dd2157d2SRichard Henderson hw_breakpoints = g_array_sized_new(true, true,
627dd2157d2SRichard Henderson sizeof(HWBreakpoint), max_hw_bps);
628ad5c6ddeSAkihiko Odaki
629fff9f555SEric Auger return ret;
630fcf5ef2aSThomas Huth }
631fcf5ef2aSThomas Huth
kvm_arch_vcpu_id(CPUState * cpu)632fcf5ef2aSThomas Huth unsigned long kvm_arch_vcpu_id(CPUState *cpu)
633fcf5ef2aSThomas Huth {
634fcf5ef2aSThomas Huth return cpu->cpu_index;
635fcf5ef2aSThomas Huth }
636fcf5ef2aSThomas Huth
637fcf5ef2aSThomas Huth /* We track all the KVM devices which need their memory addresses
638fcf5ef2aSThomas Huth * passing to the kernel in a list of these structures.
639fcf5ef2aSThomas Huth * When board init is complete we run through the list and
640fcf5ef2aSThomas Huth * tell the kernel the base addresses of the memory regions.
641fcf5ef2aSThomas Huth * We use a MemoryListener to track mapping and unmapping of
642fcf5ef2aSThomas Huth * the regions during board creation, so the board models don't
643fcf5ef2aSThomas Huth * need to do anything special for the KVM case.
64419d1bd0bSEric Auger *
64519d1bd0bSEric Auger * Sometimes the address must be OR'ed with some other fields
64619d1bd0bSEric Auger * (for example for KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION).
64719d1bd0bSEric Auger * @kda_addr_ormask aims at storing the value of those fields.
648fcf5ef2aSThomas Huth */
649fcf5ef2aSThomas Huth typedef struct KVMDevice {
650fcf5ef2aSThomas Huth struct kvm_arm_device_addr kda;
651fcf5ef2aSThomas Huth struct kvm_device_attr kdattr;
65219d1bd0bSEric Auger uint64_t kda_addr_ormask;
653fcf5ef2aSThomas Huth MemoryRegion *mr;
654fcf5ef2aSThomas Huth QSLIST_ENTRY(KVMDevice) entries;
655fcf5ef2aSThomas Huth int dev_fd;
656fcf5ef2aSThomas Huth } KVMDevice;
657fcf5ef2aSThomas Huth
658b58deb34SPaolo Bonzini static QSLIST_HEAD(, KVMDevice) kvm_devices_head;
659fcf5ef2aSThomas Huth
kvm_arm_devlistener_add(MemoryListener * listener,MemoryRegionSection * section)660fcf5ef2aSThomas Huth static void kvm_arm_devlistener_add(MemoryListener *listener,
661fcf5ef2aSThomas Huth MemoryRegionSection *section)
662fcf5ef2aSThomas Huth {
663fcf5ef2aSThomas Huth KVMDevice *kd;
664fcf5ef2aSThomas Huth
665fcf5ef2aSThomas Huth QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
666fcf5ef2aSThomas Huth if (section->mr == kd->mr) {
667fcf5ef2aSThomas Huth kd->kda.addr = section->offset_within_address_space;
668fcf5ef2aSThomas Huth }
669fcf5ef2aSThomas Huth }
670fcf5ef2aSThomas Huth }
671fcf5ef2aSThomas Huth
kvm_arm_devlistener_del(MemoryListener * listener,MemoryRegionSection * section)672fcf5ef2aSThomas Huth static void kvm_arm_devlistener_del(MemoryListener *listener,
673fcf5ef2aSThomas Huth MemoryRegionSection *section)
674fcf5ef2aSThomas Huth {
675fcf5ef2aSThomas Huth KVMDevice *kd;
676fcf5ef2aSThomas Huth
677fcf5ef2aSThomas Huth QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
678fcf5ef2aSThomas Huth if (section->mr == kd->mr) {
679fcf5ef2aSThomas Huth kd->kda.addr = -1;
680fcf5ef2aSThomas Huth }
681fcf5ef2aSThomas Huth }
682fcf5ef2aSThomas Huth }
683fcf5ef2aSThomas Huth
684fcf5ef2aSThomas Huth static MemoryListener devlistener = {
685142518bdSPeter Xu .name = "kvm-arm",
686fcf5ef2aSThomas Huth .region_add = kvm_arm_devlistener_add,
687fcf5ef2aSThomas Huth .region_del = kvm_arm_devlistener_del,
68814a868c6SIsaku Yamahata .priority = MEMORY_LISTENER_PRIORITY_MIN,
689fcf5ef2aSThomas Huth };
690fcf5ef2aSThomas Huth
kvm_arm_set_device_addr(KVMDevice * kd)691fcf5ef2aSThomas Huth static void kvm_arm_set_device_addr(KVMDevice *kd)
692fcf5ef2aSThomas Huth {
693fcf5ef2aSThomas Huth struct kvm_device_attr *attr = &kd->kdattr;
694fcf5ef2aSThomas Huth int ret;
695fcf5ef2aSThomas Huth uint64_t addr = kd->kda.addr;
69619d1bd0bSEric Auger
69719d1bd0bSEric Auger addr |= kd->kda_addr_ormask;
698fcf5ef2aSThomas Huth attr->addr = (uintptr_t)&addr;
699fcf5ef2aSThomas Huth ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
700fcf5ef2aSThomas Huth
701fcf5ef2aSThomas Huth if (ret < 0) {
702fcf5ef2aSThomas Huth fprintf(stderr, "Failed to set device address: %s\n",
703fcf5ef2aSThomas Huth strerror(-ret));
704fcf5ef2aSThomas Huth abort();
705fcf5ef2aSThomas Huth }
706fcf5ef2aSThomas Huth }
707fcf5ef2aSThomas Huth
kvm_arm_machine_init_done(Notifier * notifier,void * data)708fcf5ef2aSThomas Huth static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
709fcf5ef2aSThomas Huth {
710fcf5ef2aSThomas Huth KVMDevice *kd, *tkd;
711fcf5ef2aSThomas Huth
712fcf5ef2aSThomas Huth QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
713fcf5ef2aSThomas Huth if (kd->kda.addr != -1) {
714fcf5ef2aSThomas Huth kvm_arm_set_device_addr(kd);
715fcf5ef2aSThomas Huth }
716fcf5ef2aSThomas Huth memory_region_unref(kd->mr);
7175ff9aaabSZheng Xiang QSLIST_REMOVE_HEAD(&kvm_devices_head, entries);
718fcf5ef2aSThomas Huth g_free(kd);
719fcf5ef2aSThomas Huth }
7200bbe4354SPeter Xu memory_listener_unregister(&devlistener);
721fcf5ef2aSThomas Huth }
722fcf5ef2aSThomas Huth
723fcf5ef2aSThomas Huth static Notifier notify = {
724fcf5ef2aSThomas Huth .notify = kvm_arm_machine_init_done,
725fcf5ef2aSThomas Huth };
726fcf5ef2aSThomas Huth
kvm_arm_register_device(MemoryRegion * mr,uint64_t devid,uint64_t group,uint64_t attr,int dev_fd,uint64_t addr_ormask)727fcf5ef2aSThomas Huth void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
72819d1bd0bSEric Auger uint64_t attr, int dev_fd, uint64_t addr_ormask)
729fcf5ef2aSThomas Huth {
730fcf5ef2aSThomas Huth KVMDevice *kd;
731fcf5ef2aSThomas Huth
732fcf5ef2aSThomas Huth if (!kvm_irqchip_in_kernel()) {
733fcf5ef2aSThomas Huth return;
734fcf5ef2aSThomas Huth }
735fcf5ef2aSThomas Huth
736fcf5ef2aSThomas Huth if (QSLIST_EMPTY(&kvm_devices_head)) {
737fcf5ef2aSThomas Huth memory_listener_register(&devlistener, &address_space_memory);
738fcf5ef2aSThomas Huth qemu_add_machine_init_done_notifier(¬ify);
739fcf5ef2aSThomas Huth }
740fcf5ef2aSThomas Huth kd = g_new0(KVMDevice, 1);
741fcf5ef2aSThomas Huth kd->mr = mr;
742fcf5ef2aSThomas Huth kd->kda.id = devid;
743fcf5ef2aSThomas Huth kd->kda.addr = -1;
744fcf5ef2aSThomas Huth kd->kdattr.flags = 0;
745fcf5ef2aSThomas Huth kd->kdattr.group = group;
746fcf5ef2aSThomas Huth kd->kdattr.attr = attr;
747fcf5ef2aSThomas Huth kd->dev_fd = dev_fd;
74819d1bd0bSEric Auger kd->kda_addr_ormask = addr_ormask;
749fcf5ef2aSThomas Huth QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
750fcf5ef2aSThomas Huth memory_region_ref(kd->mr);
751fcf5ef2aSThomas Huth }
752fcf5ef2aSThomas Huth
compare_u64(const void * a,const void * b)753fcf5ef2aSThomas Huth static int compare_u64(const void *a, const void *b)
754fcf5ef2aSThomas Huth {
755fcf5ef2aSThomas Huth if (*(uint64_t *)a > *(uint64_t *)b) {
756fcf5ef2aSThomas Huth return 1;
757fcf5ef2aSThomas Huth }
758fcf5ef2aSThomas Huth if (*(uint64_t *)a < *(uint64_t *)b) {
759fcf5ef2aSThomas Huth return -1;
760fcf5ef2aSThomas Huth }
761fcf5ef2aSThomas Huth return 0;
762fcf5ef2aSThomas Huth }
763fcf5ef2aSThomas Huth
764e5ac4200SAndrew Jones /*
765e5ac4200SAndrew Jones * cpreg_values are sorted in ascending order by KVM register ID
766e5ac4200SAndrew Jones * (see kvm_arm_init_cpreg_list). This allows us to cheaply find
767e5ac4200SAndrew Jones * the storage for a KVM register by ID with a binary search.
768e5ac4200SAndrew Jones */
kvm_arm_get_cpreg_ptr(ARMCPU * cpu,uint64_t regidx)769e5ac4200SAndrew Jones static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx)
770e5ac4200SAndrew Jones {
771e5ac4200SAndrew Jones uint64_t *res;
772e5ac4200SAndrew Jones
773e5ac4200SAndrew Jones res = bsearch(®idx, cpu->cpreg_indexes, cpu->cpreg_array_len,
774e5ac4200SAndrew Jones sizeof(uint64_t), compare_u64);
775e5ac4200SAndrew Jones assert(res);
776e5ac4200SAndrew Jones
777e5ac4200SAndrew Jones return &cpu->cpreg_values[res - cpu->cpreg_indexes];
778e5ac4200SAndrew Jones }
779e5ac4200SAndrew Jones
780f38ce925SRichard Henderson /**
781f38ce925SRichard Henderson * kvm_arm_reg_syncs_via_cpreg_list:
782f38ce925SRichard Henderson * @regidx: KVM register index
783f38ce925SRichard Henderson *
784f38ce925SRichard Henderson * Return true if this KVM register should be synchronized via the
785f38ce925SRichard Henderson * cpreg list of arbitrary system registers, false if it is synchronized
786f38ce925SRichard Henderson * by hand using code in kvm_arch_get/put_registers().
787f38ce925SRichard Henderson */
kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)788f38ce925SRichard Henderson static bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
789f38ce925SRichard Henderson {
790f38ce925SRichard Henderson switch (regidx & KVM_REG_ARM_COPROC_MASK) {
791f38ce925SRichard Henderson case KVM_REG_ARM_CORE:
792f38ce925SRichard Henderson case KVM_REG_ARM64_SVE:
793f38ce925SRichard Henderson return false;
794f38ce925SRichard Henderson default:
795f38ce925SRichard Henderson return true;
796f38ce925SRichard Henderson }
797f38ce925SRichard Henderson }
798f38ce925SRichard Henderson
79909ddc012SRichard Henderson /**
80009ddc012SRichard Henderson * kvm_arm_init_cpreg_list:
80109ddc012SRichard Henderson * @cpu: ARMCPU
80209ddc012SRichard Henderson *
80309ddc012SRichard Henderson * Initialize the ARMCPU cpreg list according to the kernel's
804fcf5ef2aSThomas Huth * definition of what CPU registers it knows about (and throw away
805fcf5ef2aSThomas Huth * the previous TCG-created cpreg list).
80609ddc012SRichard Henderson *
80709ddc012SRichard Henderson * Returns: 0 if success, else < 0 error code
808fcf5ef2aSThomas Huth */
kvm_arm_init_cpreg_list(ARMCPU * cpu)80909ddc012SRichard Henderson static int kvm_arm_init_cpreg_list(ARMCPU *cpu)
810fcf5ef2aSThomas Huth {
811fcf5ef2aSThomas Huth struct kvm_reg_list rl;
812fcf5ef2aSThomas Huth struct kvm_reg_list *rlp;
813fcf5ef2aSThomas Huth int i, ret, arraylen;
814fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
815fcf5ef2aSThomas Huth
816fcf5ef2aSThomas Huth rl.n = 0;
817fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
818fcf5ef2aSThomas Huth if (ret != -E2BIG) {
819fcf5ef2aSThomas Huth return ret;
820fcf5ef2aSThomas Huth }
821fcf5ef2aSThomas Huth rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
822fcf5ef2aSThomas Huth rlp->n = rl.n;
823fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
824fcf5ef2aSThomas Huth if (ret) {
825fcf5ef2aSThomas Huth goto out;
826fcf5ef2aSThomas Huth }
827fcf5ef2aSThomas Huth /* Sort the list we get back from the kernel, since cpreg_tuples
828fcf5ef2aSThomas Huth * must be in strictly ascending order.
829fcf5ef2aSThomas Huth */
830fcf5ef2aSThomas Huth qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
831fcf5ef2aSThomas Huth
832fcf5ef2aSThomas Huth for (i = 0, arraylen = 0; i < rlp->n; i++) {
833fcf5ef2aSThomas Huth if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
834fcf5ef2aSThomas Huth continue;
835fcf5ef2aSThomas Huth }
836fcf5ef2aSThomas Huth switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
837fcf5ef2aSThomas Huth case KVM_REG_SIZE_U32:
838fcf5ef2aSThomas Huth case KVM_REG_SIZE_U64:
839fcf5ef2aSThomas Huth break;
840fcf5ef2aSThomas Huth default:
841fcf5ef2aSThomas Huth fprintf(stderr, "Can't handle size of register in kernel list\n");
842fcf5ef2aSThomas Huth ret = -EINVAL;
843fcf5ef2aSThomas Huth goto out;
844fcf5ef2aSThomas Huth }
845fcf5ef2aSThomas Huth
846fcf5ef2aSThomas Huth arraylen++;
847fcf5ef2aSThomas Huth }
848fcf5ef2aSThomas Huth
849fcf5ef2aSThomas Huth cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
850fcf5ef2aSThomas Huth cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
851fcf5ef2aSThomas Huth cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
852fcf5ef2aSThomas Huth arraylen);
853fcf5ef2aSThomas Huth cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
854fcf5ef2aSThomas Huth arraylen);
855fcf5ef2aSThomas Huth cpu->cpreg_array_len = arraylen;
856fcf5ef2aSThomas Huth cpu->cpreg_vmstate_array_len = arraylen;
857fcf5ef2aSThomas Huth
858fcf5ef2aSThomas Huth for (i = 0, arraylen = 0; i < rlp->n; i++) {
859fcf5ef2aSThomas Huth uint64_t regidx = rlp->reg[i];
860fcf5ef2aSThomas Huth if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
861fcf5ef2aSThomas Huth continue;
862fcf5ef2aSThomas Huth }
863fcf5ef2aSThomas Huth cpu->cpreg_indexes[arraylen] = regidx;
864fcf5ef2aSThomas Huth arraylen++;
865fcf5ef2aSThomas Huth }
866fcf5ef2aSThomas Huth assert(cpu->cpreg_array_len == arraylen);
867fcf5ef2aSThomas Huth
868fcf5ef2aSThomas Huth if (!write_kvmstate_to_list(cpu)) {
869fcf5ef2aSThomas Huth /* Shouldn't happen unless kernel is inconsistent about
870fcf5ef2aSThomas Huth * what registers exist.
871fcf5ef2aSThomas Huth */
872fcf5ef2aSThomas Huth fprintf(stderr, "Initial read of kernel register state failed\n");
873fcf5ef2aSThomas Huth ret = -EINVAL;
874fcf5ef2aSThomas Huth goto out;
875fcf5ef2aSThomas Huth }
876fcf5ef2aSThomas Huth
877fcf5ef2aSThomas Huth out:
878fcf5ef2aSThomas Huth g_free(rlp);
879fcf5ef2aSThomas Huth return ret;
880fcf5ef2aSThomas Huth }
881fcf5ef2aSThomas Huth
882676fe684SRichard Henderson /**
883676fe684SRichard Henderson * kvm_arm_cpreg_level:
884676fe684SRichard Henderson * @regidx: KVM register index
885676fe684SRichard Henderson *
886676fe684SRichard Henderson * Return the level of this coprocessor/system register. Return value is
887676fe684SRichard Henderson * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE.
888676fe684SRichard Henderson */
kvm_arm_cpreg_level(uint64_t regidx)889676fe684SRichard Henderson static int kvm_arm_cpreg_level(uint64_t regidx)
890676fe684SRichard Henderson {
891676fe684SRichard Henderson /*
892676fe684SRichard Henderson * All system registers are assumed to be level KVM_PUT_RUNTIME_STATE.
893676fe684SRichard Henderson * If a register should be written less often, you must add it here
894676fe684SRichard Henderson * with a state of either KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
895676fe684SRichard Henderson */
896676fe684SRichard Henderson switch (regidx) {
897676fe684SRichard Henderson case KVM_REG_ARM_TIMER_CNT:
898676fe684SRichard Henderson case KVM_REG_ARM_PTIMER_CNT:
899676fe684SRichard Henderson return KVM_PUT_FULL_STATE;
900676fe684SRichard Henderson }
901676fe684SRichard Henderson return KVM_PUT_RUNTIME_STATE;
902676fe684SRichard Henderson }
903676fe684SRichard Henderson
write_kvmstate_to_list(ARMCPU * cpu)904fcf5ef2aSThomas Huth bool write_kvmstate_to_list(ARMCPU *cpu)
905fcf5ef2aSThomas Huth {
906fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
907fcf5ef2aSThomas Huth int i;
908fcf5ef2aSThomas Huth bool ok = true;
909fcf5ef2aSThomas Huth
910fcf5ef2aSThomas Huth for (i = 0; i < cpu->cpreg_array_len; i++) {
911fcf5ef2aSThomas Huth uint64_t regidx = cpu->cpreg_indexes[i];
912fcf5ef2aSThomas Huth uint32_t v32;
913fcf5ef2aSThomas Huth int ret;
914fcf5ef2aSThomas Huth
915fcf5ef2aSThomas Huth switch (regidx & KVM_REG_SIZE_MASK) {
916fcf5ef2aSThomas Huth case KVM_REG_SIZE_U32:
91740d45b85SCornelia Huck ret = kvm_get_one_reg(cs, regidx, &v32);
918fcf5ef2aSThomas Huth if (!ret) {
919fcf5ef2aSThomas Huth cpu->cpreg_values[i] = v32;
920fcf5ef2aSThomas Huth }
921fcf5ef2aSThomas Huth break;
922fcf5ef2aSThomas Huth case KVM_REG_SIZE_U64:
92340d45b85SCornelia Huck ret = kvm_get_one_reg(cs, regidx, cpu->cpreg_values + i);
924fcf5ef2aSThomas Huth break;
925fcf5ef2aSThomas Huth default:
926d385a605SRichard Henderson g_assert_not_reached();
927fcf5ef2aSThomas Huth }
928fcf5ef2aSThomas Huth if (ret) {
929fcf5ef2aSThomas Huth ok = false;
930fcf5ef2aSThomas Huth }
931fcf5ef2aSThomas Huth }
932fcf5ef2aSThomas Huth return ok;
933fcf5ef2aSThomas Huth }
934fcf5ef2aSThomas Huth
write_list_to_kvmstate(ARMCPU * cpu,int level)935fcf5ef2aSThomas Huth bool write_list_to_kvmstate(ARMCPU *cpu, int level)
936fcf5ef2aSThomas Huth {
937fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
938fcf5ef2aSThomas Huth int i;
939fcf5ef2aSThomas Huth bool ok = true;
940fcf5ef2aSThomas Huth
941fcf5ef2aSThomas Huth for (i = 0; i < cpu->cpreg_array_len; i++) {
942fcf5ef2aSThomas Huth uint64_t regidx = cpu->cpreg_indexes[i];
943fcf5ef2aSThomas Huth uint32_t v32;
944fcf5ef2aSThomas Huth int ret;
945fcf5ef2aSThomas Huth
946fcf5ef2aSThomas Huth if (kvm_arm_cpreg_level(regidx) > level) {
947fcf5ef2aSThomas Huth continue;
948fcf5ef2aSThomas Huth }
949fcf5ef2aSThomas Huth
950fcf5ef2aSThomas Huth switch (regidx & KVM_REG_SIZE_MASK) {
951fcf5ef2aSThomas Huth case KVM_REG_SIZE_U32:
952fcf5ef2aSThomas Huth v32 = cpu->cpreg_values[i];
9536c8b9a74SCornelia Huck ret = kvm_set_one_reg(cs, regidx, &v32);
954fcf5ef2aSThomas Huth break;
955fcf5ef2aSThomas Huth case KVM_REG_SIZE_U64:
9566c8b9a74SCornelia Huck ret = kvm_set_one_reg(cs, regidx, cpu->cpreg_values + i);
957fcf5ef2aSThomas Huth break;
958fcf5ef2aSThomas Huth default:
959d385a605SRichard Henderson g_assert_not_reached();
960fcf5ef2aSThomas Huth }
961fcf5ef2aSThomas Huth if (ret) {
962fcf5ef2aSThomas Huth /* We might fail for "unknown register" and also for
963fcf5ef2aSThomas Huth * "you tried to set a register which is constant with
964fcf5ef2aSThomas Huth * a different value from what it actually contains".
965fcf5ef2aSThomas Huth */
966fcf5ef2aSThomas Huth ok = false;
967fcf5ef2aSThomas Huth }
968fcf5ef2aSThomas Huth }
969fcf5ef2aSThomas Huth return ok;
970fcf5ef2aSThomas Huth }
971fcf5ef2aSThomas Huth
kvm_arm_cpu_pre_save(ARMCPU * cpu)972e5ac4200SAndrew Jones void kvm_arm_cpu_pre_save(ARMCPU *cpu)
973e5ac4200SAndrew Jones {
974e5ac4200SAndrew Jones /* KVM virtual time adjustment */
975e5ac4200SAndrew Jones if (cpu->kvm_vtime_dirty) {
976e5ac4200SAndrew Jones *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT) = cpu->kvm_vtime;
977e5ac4200SAndrew Jones }
978e5ac4200SAndrew Jones }
979e5ac4200SAndrew Jones
kvm_arm_cpu_post_load(ARMCPU * cpu)980e5ac4200SAndrew Jones void kvm_arm_cpu_post_load(ARMCPU *cpu)
981e5ac4200SAndrew Jones {
982e5ac4200SAndrew Jones /* KVM virtual time adjustment */
983e5ac4200SAndrew Jones if (cpu->kvm_adjvtime) {
984e5ac4200SAndrew Jones cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT);
985e5ac4200SAndrew Jones cpu->kvm_vtime_dirty = true;
986e5ac4200SAndrew Jones }
987e5ac4200SAndrew Jones }
988e5ac4200SAndrew Jones
kvm_arm_reset_vcpu(ARMCPU * cpu)989fcf5ef2aSThomas Huth void kvm_arm_reset_vcpu(ARMCPU *cpu)
990fcf5ef2aSThomas Huth {
991fcf5ef2aSThomas Huth int ret;
992fcf5ef2aSThomas Huth
993fcf5ef2aSThomas Huth /* Re-init VCPU so that all registers are set to
994fcf5ef2aSThomas Huth * their respective reset values.
995fcf5ef2aSThomas Huth */
996bbb22d58SPhilippe Mathieu-Daudé ret = kvm_arm_vcpu_init(cpu);
997fcf5ef2aSThomas Huth if (ret < 0) {
998fcf5ef2aSThomas Huth fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
999fcf5ef2aSThomas Huth abort();
1000fcf5ef2aSThomas Huth }
1001fcf5ef2aSThomas Huth if (!write_kvmstate_to_list(cpu)) {
1002fcf5ef2aSThomas Huth fprintf(stderr, "write_kvmstate_to_list failed\n");
1003fcf5ef2aSThomas Huth abort();
1004fcf5ef2aSThomas Huth }
1005b698e4eeSPeter Maydell /*
1006b698e4eeSPeter Maydell * Sync the reset values also into the CPUState. This is necessary
1007b698e4eeSPeter Maydell * because the next thing we do will be a kvm_arch_put_registers()
1008b698e4eeSPeter Maydell * which will update the list values from the CPUState before copying
1009b698e4eeSPeter Maydell * the list values back to KVM. It's OK to ignore failure returns here
1010b698e4eeSPeter Maydell * for the same reason we do so in kvm_arch_get_registers().
1011b698e4eeSPeter Maydell */
1012b698e4eeSPeter Maydell write_list_to_cpustate(cpu);
1013fcf5ef2aSThomas Huth }
1014fcf5ef2aSThomas Huth
1015fcf5ef2aSThomas Huth /*
1016fcf5ef2aSThomas Huth * Update KVM's MP_STATE based on what QEMU thinks it is
1017fcf5ef2aSThomas Huth */
kvm_arm_sync_mpstate_to_kvm(ARMCPU * cpu)101871c34911SRichard Henderson static int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
1019fcf5ef2aSThomas Huth {
1020fcf5ef2aSThomas Huth if (cap_has_mp_state) {
1021fcf5ef2aSThomas Huth struct kvm_mp_state mp_state = {
1022062ba099SAlex Bennée .mp_state = (cpu->power_state == PSCI_OFF) ?
1023062ba099SAlex Bennée KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
1024fcf5ef2aSThomas Huth };
102571c34911SRichard Henderson return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
1026fcf5ef2aSThomas Huth }
1027fcf5ef2aSThomas Huth return 0;
1028fcf5ef2aSThomas Huth }
1029fcf5ef2aSThomas Huth
1030fcf5ef2aSThomas Huth /*
1031fcf5ef2aSThomas Huth * Sync the KVM MP_STATE into QEMU
1032fcf5ef2aSThomas Huth */
kvm_arm_sync_mpstate_to_qemu(ARMCPU * cpu)103371c34911SRichard Henderson static int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
1034fcf5ef2aSThomas Huth {
1035fcf5ef2aSThomas Huth if (cap_has_mp_state) {
1036fcf5ef2aSThomas Huth struct kvm_mp_state mp_state;
1037fcf5ef2aSThomas Huth int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
1038fcf5ef2aSThomas Huth if (ret) {
103971c34911SRichard Henderson return ret;
1040fcf5ef2aSThomas Huth }
1041062ba099SAlex Bennée cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ?
1042062ba099SAlex Bennée PSCI_OFF : PSCI_ON;
1043fcf5ef2aSThomas Huth }
1044fcf5ef2aSThomas Huth return 0;
1045fcf5ef2aSThomas Huth }
1046fcf5ef2aSThomas Huth
104746512471SRichard Henderson /**
104846512471SRichard Henderson * kvm_arm_get_virtual_time:
104976acc987SPhilippe Mathieu-Daudé * @cpu: ARMCPU
105046512471SRichard Henderson *
105146512471SRichard Henderson * Gets the VCPU's virtual counter and stores it in the KVM CPU state.
105246512471SRichard Henderson */
kvm_arm_get_virtual_time(ARMCPU * cpu)105376acc987SPhilippe Mathieu-Daudé static void kvm_arm_get_virtual_time(ARMCPU *cpu)
1054e5ac4200SAndrew Jones {
1055e5ac4200SAndrew Jones int ret;
1056e5ac4200SAndrew Jones
1057e5ac4200SAndrew Jones if (cpu->kvm_vtime_dirty) {
1058e5ac4200SAndrew Jones return;
1059e5ac4200SAndrew Jones }
1060e5ac4200SAndrew Jones
106176acc987SPhilippe Mathieu-Daudé ret = kvm_get_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
1062e5ac4200SAndrew Jones if (ret) {
1063e5ac4200SAndrew Jones error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
1064e5ac4200SAndrew Jones abort();
1065e5ac4200SAndrew Jones }
1066e5ac4200SAndrew Jones
1067e5ac4200SAndrew Jones cpu->kvm_vtime_dirty = true;
1068e5ac4200SAndrew Jones }
1069e5ac4200SAndrew Jones
107046512471SRichard Henderson /**
107146512471SRichard Henderson * kvm_arm_put_virtual_time:
107276acc987SPhilippe Mathieu-Daudé * @cpu: ARMCPU
107346512471SRichard Henderson *
107446512471SRichard Henderson * Sets the VCPU's virtual counter to the value stored in the KVM CPU state.
107546512471SRichard Henderson */
kvm_arm_put_virtual_time(ARMCPU * cpu)107676acc987SPhilippe Mathieu-Daudé static void kvm_arm_put_virtual_time(ARMCPU *cpu)
1077e5ac4200SAndrew Jones {
1078e5ac4200SAndrew Jones int ret;
1079e5ac4200SAndrew Jones
1080e5ac4200SAndrew Jones if (!cpu->kvm_vtime_dirty) {
1081e5ac4200SAndrew Jones return;
1082e5ac4200SAndrew Jones }
1083e5ac4200SAndrew Jones
108476acc987SPhilippe Mathieu-Daudé ret = kvm_set_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
1085e5ac4200SAndrew Jones if (ret) {
1086e5ac4200SAndrew Jones error_report("Failed to set KVM_REG_ARM_TIMER_CNT");
1087e5ac4200SAndrew Jones abort();
1088e5ac4200SAndrew Jones }
1089e5ac4200SAndrew Jones
1090e5ac4200SAndrew Jones cpu->kvm_vtime_dirty = false;
1091e5ac4200SAndrew Jones }
1092e5ac4200SAndrew Jones
1093353e03cdSRichard Henderson /**
1094353e03cdSRichard Henderson * kvm_put_vcpu_events:
1095353e03cdSRichard Henderson * @cpu: ARMCPU
1096353e03cdSRichard Henderson *
1097353e03cdSRichard Henderson * Put VCPU related state to kvm.
1098353e03cdSRichard Henderson *
1099353e03cdSRichard Henderson * Returns: 0 if success else < 0 error code
1100353e03cdSRichard Henderson */
kvm_put_vcpu_events(ARMCPU * cpu)1101353e03cdSRichard Henderson static int kvm_put_vcpu_events(ARMCPU *cpu)
1102202ccb6bSDongjiu Geng {
1103202ccb6bSDongjiu Geng CPUARMState *env = &cpu->env;
1104202ccb6bSDongjiu Geng struct kvm_vcpu_events events;
1105202ccb6bSDongjiu Geng int ret;
1106202ccb6bSDongjiu Geng
1107202ccb6bSDongjiu Geng if (!kvm_has_vcpu_events()) {
1108202ccb6bSDongjiu Geng return 0;
1109202ccb6bSDongjiu Geng }
1110202ccb6bSDongjiu Geng
1111202ccb6bSDongjiu Geng memset(&events, 0, sizeof(events));
1112202ccb6bSDongjiu Geng events.exception.serror_pending = env->serror.pending;
1113202ccb6bSDongjiu Geng
1114202ccb6bSDongjiu Geng /* Inject SError to guest with specified syndrome if host kernel
1115202ccb6bSDongjiu Geng * supports it, otherwise inject SError without syndrome.
1116202ccb6bSDongjiu Geng */
1117202ccb6bSDongjiu Geng if (cap_has_inject_serror_esr) {
1118202ccb6bSDongjiu Geng events.exception.serror_has_esr = env->serror.has_esr;
1119202ccb6bSDongjiu Geng events.exception.serror_esr = env->serror.esr;
1120202ccb6bSDongjiu Geng }
1121202ccb6bSDongjiu Geng
1122202ccb6bSDongjiu Geng ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
1123202ccb6bSDongjiu Geng if (ret) {
1124202ccb6bSDongjiu Geng error_report("failed to put vcpu events");
1125202ccb6bSDongjiu Geng }
1126202ccb6bSDongjiu Geng
1127202ccb6bSDongjiu Geng return ret;
1128202ccb6bSDongjiu Geng }
1129202ccb6bSDongjiu Geng
1130353e03cdSRichard Henderson /**
1131353e03cdSRichard Henderson * kvm_get_vcpu_events:
1132353e03cdSRichard Henderson * @cpu: ARMCPU
1133353e03cdSRichard Henderson *
1134353e03cdSRichard Henderson * Get VCPU related state from kvm.
1135353e03cdSRichard Henderson *
1136353e03cdSRichard Henderson * Returns: 0 if success else < 0 error code
1137353e03cdSRichard Henderson */
kvm_get_vcpu_events(ARMCPU * cpu)1138353e03cdSRichard Henderson static int kvm_get_vcpu_events(ARMCPU *cpu)
1139202ccb6bSDongjiu Geng {
1140202ccb6bSDongjiu Geng CPUARMState *env = &cpu->env;
1141202ccb6bSDongjiu Geng struct kvm_vcpu_events events;
1142202ccb6bSDongjiu Geng int ret;
1143202ccb6bSDongjiu Geng
1144202ccb6bSDongjiu Geng if (!kvm_has_vcpu_events()) {
1145202ccb6bSDongjiu Geng return 0;
1146202ccb6bSDongjiu Geng }
1147202ccb6bSDongjiu Geng
1148202ccb6bSDongjiu Geng memset(&events, 0, sizeof(events));
1149202ccb6bSDongjiu Geng ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
1150202ccb6bSDongjiu Geng if (ret) {
1151202ccb6bSDongjiu Geng error_report("failed to get vcpu events");
1152202ccb6bSDongjiu Geng return ret;
1153202ccb6bSDongjiu Geng }
1154202ccb6bSDongjiu Geng
1155202ccb6bSDongjiu Geng env->serror.pending = events.exception.serror_pending;
1156202ccb6bSDongjiu Geng env->serror.has_esr = events.exception.serror_has_esr;
1157202ccb6bSDongjiu Geng env->serror.esr = events.exception.serror_esr;
1158202ccb6bSDongjiu Geng
1159202ccb6bSDongjiu Geng return 0;
1160202ccb6bSDongjiu Geng }
1161202ccb6bSDongjiu Geng
116220c83dc9SRichard Henderson #define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
116320c83dc9SRichard Henderson #define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
116420c83dc9SRichard Henderson
116520c83dc9SRichard Henderson /*
116620c83dc9SRichard Henderson * ESR_EL1
116720c83dc9SRichard Henderson * ISS encoding
116820c83dc9SRichard Henderson * AARCH64: DFSC, bits [5:0]
116920c83dc9SRichard Henderson * AARCH32:
117020c83dc9SRichard Henderson * TTBCR.EAE == 0
117120c83dc9SRichard Henderson * FS[4] - DFSR[10]
117220c83dc9SRichard Henderson * FS[3:0] - DFSR[3:0]
117320c83dc9SRichard Henderson * TTBCR.EAE == 1
117420c83dc9SRichard Henderson * FS, bits [5:0]
117520c83dc9SRichard Henderson */
117620c83dc9SRichard Henderson #define ESR_DFSC(aarch64, lpae, v) \
117720c83dc9SRichard Henderson ((aarch64 || (lpae)) ? ((v) & 0x3F) \
117820c83dc9SRichard Henderson : (((v) >> 6) | ((v) & 0x1F)))
117920c83dc9SRichard Henderson
118020c83dc9SRichard Henderson #define ESR_DFSC_EXTABT(aarch64, lpae) \
118120c83dc9SRichard Henderson ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
118220c83dc9SRichard Henderson
118320c83dc9SRichard Henderson /**
118420c83dc9SRichard Henderson * kvm_arm_verify_ext_dabt_pending:
1185ca0d1b7cSPhilippe Mathieu-Daudé * @cpu: ARMCPU
118620c83dc9SRichard Henderson *
118720c83dc9SRichard Henderson * Verify the fault status code wrt the Ext DABT injection
118820c83dc9SRichard Henderson *
118920c83dc9SRichard Henderson * Returns: true if the fault status code is as expected, false otherwise
119020c83dc9SRichard Henderson */
kvm_arm_verify_ext_dabt_pending(ARMCPU * cpu)1191ca0d1b7cSPhilippe Mathieu-Daudé static bool kvm_arm_verify_ext_dabt_pending(ARMCPU *cpu)
119220c83dc9SRichard Henderson {
1193ca0d1b7cSPhilippe Mathieu-Daudé CPUState *cs = CPU(cpu);
119420c83dc9SRichard Henderson uint64_t dfsr_val;
119520c83dc9SRichard Henderson
119620c83dc9SRichard Henderson if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) {
119720c83dc9SRichard Henderson CPUARMState *env = &cpu->env;
119820c83dc9SRichard Henderson int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64);
119920c83dc9SRichard Henderson int lpae = 0;
120020c83dc9SRichard Henderson
120120c83dc9SRichard Henderson if (!aarch64_mode) {
120220c83dc9SRichard Henderson uint64_t ttbcr;
120320c83dc9SRichard Henderson
120420c83dc9SRichard Henderson if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) {
120520c83dc9SRichard Henderson lpae = arm_feature(env, ARM_FEATURE_LPAE)
120620c83dc9SRichard Henderson && (ttbcr & TTBCR_EAE);
120720c83dc9SRichard Henderson }
120820c83dc9SRichard Henderson }
120920c83dc9SRichard Henderson /*
121020c83dc9SRichard Henderson * The verification here is based on the DFSC bits
121120c83dc9SRichard Henderson * of the ESR_EL1 reg only
121220c83dc9SRichard Henderson */
121320c83dc9SRichard Henderson return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) ==
121420c83dc9SRichard Henderson ESR_DFSC_EXTABT(aarch64_mode, lpae));
121520c83dc9SRichard Henderson }
121620c83dc9SRichard Henderson return false;
121720c83dc9SRichard Henderson }
121820c83dc9SRichard Henderson
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)1219fcf5ef2aSThomas Huth void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1220fcf5ef2aSThomas Huth {
12211711bfa5SBeata Michalska ARMCPU *cpu = ARM_CPU(cs);
12221711bfa5SBeata Michalska CPUARMState *env = &cpu->env;
12231711bfa5SBeata Michalska
12241711bfa5SBeata Michalska if (unlikely(env->ext_dabt_raised)) {
12251711bfa5SBeata Michalska /*
12261711bfa5SBeata Michalska * Verifying that the ext DABT has been properly injected,
12271711bfa5SBeata Michalska * otherwise risking indefinitely re-running the faulting instruction
12281711bfa5SBeata Michalska * Covering a very narrow case for kernels 5.5..5.5.4
12291711bfa5SBeata Michalska * when injected abort was misconfigured to be
12301711bfa5SBeata Michalska * an IMPLEMENTATION DEFINED exception (for 32-bit EL1)
12311711bfa5SBeata Michalska */
12321711bfa5SBeata Michalska if (!arm_feature(env, ARM_FEATURE_AARCH64) &&
1233ca0d1b7cSPhilippe Mathieu-Daudé unlikely(!kvm_arm_verify_ext_dabt_pending(cpu))) {
12341711bfa5SBeata Michalska
12351711bfa5SBeata Michalska error_report("Data abort exception with no valid ISS generated by "
12361711bfa5SBeata Michalska "guest memory access. KVM unable to emulate faulting "
12371711bfa5SBeata Michalska "instruction. Failed to inject an external data abort "
12381711bfa5SBeata Michalska "into the guest.");
12391711bfa5SBeata Michalska abort();
12401711bfa5SBeata Michalska }
12411711bfa5SBeata Michalska /* Clear the status */
12421711bfa5SBeata Michalska env->ext_dabt_raised = 0;
12431711bfa5SBeata Michalska }
1244fcf5ef2aSThomas Huth }
1245fcf5ef2aSThomas Huth
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)1246fcf5ef2aSThomas Huth MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1247fcf5ef2aSThomas Huth {
12485d721b78SAlexander Graf ARMCPU *cpu;
12495d721b78SAlexander Graf uint32_t switched_level;
12505d721b78SAlexander Graf
12515d721b78SAlexander Graf if (kvm_irqchip_in_kernel()) {
12525d721b78SAlexander Graf /*
12535d721b78SAlexander Graf * We only need to sync timer states with user-space interrupt
12545d721b78SAlexander Graf * controllers, so return early and save cycles if we don't.
12555d721b78SAlexander Graf */
12565d721b78SAlexander Graf return MEMTXATTRS_UNSPECIFIED;
12575d721b78SAlexander Graf }
12585d721b78SAlexander Graf
12595d721b78SAlexander Graf cpu = ARM_CPU(cs);
12605d721b78SAlexander Graf
12615d721b78SAlexander Graf /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */
12625d721b78SAlexander Graf if (run->s.regs.device_irq_level != cpu->device_irq_level) {
12635d721b78SAlexander Graf switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
12645d721b78SAlexander Graf
1265195801d7SStefan Hajnoczi bql_lock();
12665d721b78SAlexander Graf
12675d721b78SAlexander Graf if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
12685d721b78SAlexander Graf qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
12695d721b78SAlexander Graf !!(run->s.regs.device_irq_level &
12705d721b78SAlexander Graf KVM_ARM_DEV_EL1_VTIMER));
12715d721b78SAlexander Graf switched_level &= ~KVM_ARM_DEV_EL1_VTIMER;
12725d721b78SAlexander Graf }
12735d721b78SAlexander Graf
12745d721b78SAlexander Graf if (switched_level & KVM_ARM_DEV_EL1_PTIMER) {
12755d721b78SAlexander Graf qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS],
12765d721b78SAlexander Graf !!(run->s.regs.device_irq_level &
12775d721b78SAlexander Graf KVM_ARM_DEV_EL1_PTIMER));
12785d721b78SAlexander Graf switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
12795d721b78SAlexander Graf }
12805d721b78SAlexander Graf
1281b1659527SAndrew Jones if (switched_level & KVM_ARM_DEV_PMU) {
1282b1659527SAndrew Jones qemu_set_irq(cpu->pmu_interrupt,
1283b1659527SAndrew Jones !!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU));
1284b1659527SAndrew Jones switched_level &= ~KVM_ARM_DEV_PMU;
1285b1659527SAndrew Jones }
12865d721b78SAlexander Graf
12875d721b78SAlexander Graf if (switched_level) {
12885d721b78SAlexander Graf qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
12895d721b78SAlexander Graf __func__, switched_level);
12905d721b78SAlexander Graf }
12915d721b78SAlexander Graf
12925d721b78SAlexander Graf /* We also mark unknown levels as processed to not waste cycles */
12935d721b78SAlexander Graf cpu->device_irq_level = run->s.regs.device_irq_level;
1294195801d7SStefan Hajnoczi bql_unlock();
12955d721b78SAlexander Graf }
12965d721b78SAlexander Graf
1297fcf5ef2aSThomas Huth return MEMTXATTRS_UNSPECIFIED;
1298fcf5ef2aSThomas Huth }
1299fcf5ef2aSThomas Huth
kvm_arm_vm_state_change(void * opaque,bool running,RunState state)1300396b6c50SRichard Henderson static void kvm_arm_vm_state_change(void *opaque, bool running, RunState state)
1301e5ac4200SAndrew Jones {
130276acc987SPhilippe Mathieu-Daudé ARMCPU *cpu = opaque;
1303e5ac4200SAndrew Jones
1304e5ac4200SAndrew Jones if (running) {
1305e5ac4200SAndrew Jones if (cpu->kvm_adjvtime) {
130676acc987SPhilippe Mathieu-Daudé kvm_arm_put_virtual_time(cpu);
1307e5ac4200SAndrew Jones }
1308e5ac4200SAndrew Jones } else {
1309e5ac4200SAndrew Jones if (cpu->kvm_adjvtime) {
131076acc987SPhilippe Mathieu-Daudé kvm_arm_get_virtual_time(cpu);
1311e5ac4200SAndrew Jones }
1312e5ac4200SAndrew Jones }
1313e5ac4200SAndrew Jones }
1314fcf5ef2aSThomas Huth
1315694bcaa8SBeata Michalska /**
1316694bcaa8SBeata Michalska * kvm_arm_handle_dabt_nisv:
13173187e06aSPhilippe Mathieu-Daudé * @cpu: ARMCPU
1318694bcaa8SBeata Michalska * @esr_iss: ISS encoding (limited) for the exception from Data Abort
1319694bcaa8SBeata Michalska * ISV bit set to '0b0' -> no valid instruction syndrome
1320694bcaa8SBeata Michalska * @fault_ipa: faulting address for the synchronous data abort
1321694bcaa8SBeata Michalska *
1322694bcaa8SBeata Michalska * Returns: 0 if the exception has been handled, < 0 otherwise
1323694bcaa8SBeata Michalska */
kvm_arm_handle_dabt_nisv(ARMCPU * cpu,uint64_t esr_iss,uint64_t fault_ipa)13243187e06aSPhilippe Mathieu-Daudé static int kvm_arm_handle_dabt_nisv(ARMCPU *cpu, uint64_t esr_iss,
1325694bcaa8SBeata Michalska uint64_t fault_ipa)
1326694bcaa8SBeata Michalska {
13271711bfa5SBeata Michalska CPUARMState *env = &cpu->env;
1328694bcaa8SBeata Michalska /*
1329694bcaa8SBeata Michalska * Request KVM to inject the external data abort into the guest
1330694bcaa8SBeata Michalska */
1331694bcaa8SBeata Michalska if (cap_has_inject_ext_dabt) {
1332694bcaa8SBeata Michalska struct kvm_vcpu_events events = { };
1333694bcaa8SBeata Michalska /*
1334694bcaa8SBeata Michalska * The external data abort event will be handled immediately by KVM
1335694bcaa8SBeata Michalska * using the address fault that triggered the exit on given VCPU.
1336694bcaa8SBeata Michalska * Requesting injection of the external data abort does not rely
1337694bcaa8SBeata Michalska * on any other VCPU state. Therefore, in this particular case, the VCPU
1338694bcaa8SBeata Michalska * synchronization can be exceptionally skipped.
1339694bcaa8SBeata Michalska */
1340694bcaa8SBeata Michalska events.exception.ext_dabt_pending = 1;
1341694bcaa8SBeata Michalska /* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */
13423187e06aSPhilippe Mathieu-Daudé if (!kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events)) {
13431711bfa5SBeata Michalska env->ext_dabt_raised = 1;
13441711bfa5SBeata Michalska return 0;
13451711bfa5SBeata Michalska }
1346694bcaa8SBeata Michalska } else {
1347694bcaa8SBeata Michalska error_report("Data abort exception triggered by guest memory access "
1348694bcaa8SBeata Michalska "at physical address: 0x" TARGET_FMT_lx,
1349694bcaa8SBeata Michalska (target_ulong)fault_ipa);
1350694bcaa8SBeata Michalska error_printf("KVM unable to emulate faulting instruction.\n");
1351694bcaa8SBeata Michalska }
1352694bcaa8SBeata Michalska return -1;
1353694bcaa8SBeata Michalska }
1354694bcaa8SBeata Michalska
13555cba8f26SRichard Henderson /**
13565cba8f26SRichard Henderson * kvm_arm_handle_debug:
135739639275SPhilippe Mathieu-Daudé * @cpu: ARMCPU
13585cba8f26SRichard Henderson * @debug_exit: debug part of the KVM exit structure
13595cba8f26SRichard Henderson *
13605cba8f26SRichard Henderson * Returns: TRUE if the debug exception was handled.
13615cba8f26SRichard Henderson *
13625cba8f26SRichard Henderson * See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
13635cba8f26SRichard Henderson *
13645cba8f26SRichard Henderson * To minimise translating between kernel and user-space the kernel
13655cba8f26SRichard Henderson * ABI just provides user-space with the full exception syndrome
13665cba8f26SRichard Henderson * register value to be decoded in QEMU.
13675cba8f26SRichard Henderson */
kvm_arm_handle_debug(ARMCPU * cpu,struct kvm_debug_exit_arch * debug_exit)136839639275SPhilippe Mathieu-Daudé static bool kvm_arm_handle_debug(ARMCPU *cpu,
13695cba8f26SRichard Henderson struct kvm_debug_exit_arch *debug_exit)
13705cba8f26SRichard Henderson {
13715cba8f26SRichard Henderson int hsr_ec = syn_get_ec(debug_exit->hsr);
137239639275SPhilippe Mathieu-Daudé CPUState *cs = CPU(cpu);
13735cba8f26SRichard Henderson CPUARMState *env = &cpu->env;
13745cba8f26SRichard Henderson
13755cba8f26SRichard Henderson /* Ensure PC is synchronised */
13765cba8f26SRichard Henderson kvm_cpu_synchronize_state(cs);
13775cba8f26SRichard Henderson
13785cba8f26SRichard Henderson switch (hsr_ec) {
13795cba8f26SRichard Henderson case EC_SOFTWARESTEP:
13805cba8f26SRichard Henderson if (cs->singlestep_enabled) {
13815cba8f26SRichard Henderson return true;
13825cba8f26SRichard Henderson } else {
13835cba8f26SRichard Henderson /*
13845cba8f26SRichard Henderson * The kernel should have suppressed the guest's ability to
13855cba8f26SRichard Henderson * single step at this point so something has gone wrong.
13865cba8f26SRichard Henderson */
13875cba8f26SRichard Henderson error_report("%s: guest single-step while debugging unsupported"
13885cba8f26SRichard Henderson " (%"PRIx64", %"PRIx32")",
13895cba8f26SRichard Henderson __func__, env->pc, debug_exit->hsr);
13905cba8f26SRichard Henderson return false;
13915cba8f26SRichard Henderson }
13925cba8f26SRichard Henderson break;
13935cba8f26SRichard Henderson case EC_AA64_BKPT:
13945cba8f26SRichard Henderson if (kvm_find_sw_breakpoint(cs, env->pc)) {
13955cba8f26SRichard Henderson return true;
13965cba8f26SRichard Henderson }
13975cba8f26SRichard Henderson break;
13985cba8f26SRichard Henderson case EC_BREAKPOINT:
13995cba8f26SRichard Henderson if (find_hw_breakpoint(cs, env->pc)) {
14005cba8f26SRichard Henderson return true;
14015cba8f26SRichard Henderson }
14025cba8f26SRichard Henderson break;
14035cba8f26SRichard Henderson case EC_WATCHPOINT:
14045cba8f26SRichard Henderson {
14055cba8f26SRichard Henderson CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
14065cba8f26SRichard Henderson if (wp) {
14075cba8f26SRichard Henderson cs->watchpoint_hit = wp;
14085cba8f26SRichard Henderson return true;
14095cba8f26SRichard Henderson }
14105cba8f26SRichard Henderson break;
14115cba8f26SRichard Henderson }
14125cba8f26SRichard Henderson default:
14135cba8f26SRichard Henderson error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
14145cba8f26SRichard Henderson __func__, debug_exit->hsr, env->pc);
14155cba8f26SRichard Henderson }
14165cba8f26SRichard Henderson
14175cba8f26SRichard Henderson /* If we are not handling the debug exception it must belong to
14185cba8f26SRichard Henderson * the guest. Let's re-use the existing TCG interrupt code to set
14195cba8f26SRichard Henderson * everything up properly.
14205cba8f26SRichard Henderson */
14215cba8f26SRichard Henderson cs->exception_index = EXCP_BKPT;
14225cba8f26SRichard Henderson env->exception.syndrome = debug_exit->hsr;
14235cba8f26SRichard Henderson env->exception.vaddress = debug_exit->far;
14245cba8f26SRichard Henderson env->exception.target_el = 1;
1425195801d7SStefan Hajnoczi bql_lock();
14265cba8f26SRichard Henderson arm_cpu_do_interrupt(cs);
1427195801d7SStefan Hajnoczi bql_unlock();
14285cba8f26SRichard Henderson
14295cba8f26SRichard Henderson return false;
14305cba8f26SRichard Henderson }
14315cba8f26SRichard Henderson
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)1432fcf5ef2aSThomas Huth int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1433fcf5ef2aSThomas Huth {
14343187e06aSPhilippe Mathieu-Daudé ARMCPU *cpu = ARM_CPU(cs);
1435fcf5ef2aSThomas Huth int ret = 0;
1436fcf5ef2aSThomas Huth
1437fcf5ef2aSThomas Huth switch (run->exit_reason) {
1438fcf5ef2aSThomas Huth case KVM_EXIT_DEBUG:
143939639275SPhilippe Mathieu-Daudé if (kvm_arm_handle_debug(cpu, &run->debug.arch)) {
1440fcf5ef2aSThomas Huth ret = EXCP_DEBUG;
1441fcf5ef2aSThomas Huth } /* otherwise return to guest */
1442fcf5ef2aSThomas Huth break;
1443694bcaa8SBeata Michalska case KVM_EXIT_ARM_NISV:
1444694bcaa8SBeata Michalska /* External DABT with no valid iss to decode */
14453187e06aSPhilippe Mathieu-Daudé ret = kvm_arm_handle_dabt_nisv(cpu, run->arm_nisv.esr_iss,
1446694bcaa8SBeata Michalska run->arm_nisv.fault_ipa);
1447694bcaa8SBeata Michalska break;
1448fcf5ef2aSThomas Huth default:
1449fcf5ef2aSThomas Huth qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
1450fcf5ef2aSThomas Huth __func__, run->exit_reason);
1451fcf5ef2aSThomas Huth break;
1452fcf5ef2aSThomas Huth }
1453fcf5ef2aSThomas Huth return ret;
1454fcf5ef2aSThomas Huth }
1455fcf5ef2aSThomas Huth
kvm_arch_stop_on_emulation_error(CPUState * cs)1456fcf5ef2aSThomas Huth bool kvm_arch_stop_on_emulation_error(CPUState *cs)
1457fcf5ef2aSThomas Huth {
1458fcf5ef2aSThomas Huth return true;
1459fcf5ef2aSThomas Huth }
1460fcf5ef2aSThomas Huth
kvm_arch_process_async_events(CPUState * cs)1461fcf5ef2aSThomas Huth int kvm_arch_process_async_events(CPUState *cs)
1462fcf5ef2aSThomas Huth {
1463fcf5ef2aSThomas Huth return 0;
1464fcf5ef2aSThomas Huth }
1465fcf5ef2aSThomas Huth
1466ec4145f7SRichard Henderson /**
1467ea79c599SRichard Henderson * kvm_arm_hw_debug_active:
1468366bf10eSPhilippe Mathieu-Daudé * @cpu: ARMCPU
1469ea79c599SRichard Henderson *
1470ea79c599SRichard Henderson * Return: TRUE if any hardware breakpoints in use.
1471ea79c599SRichard Henderson */
kvm_arm_hw_debug_active(ARMCPU * cpu)1472366bf10eSPhilippe Mathieu-Daudé static bool kvm_arm_hw_debug_active(ARMCPU *cpu)
1473ea79c599SRichard Henderson {
1474ea79c599SRichard Henderson return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
1475ea79c599SRichard Henderson }
1476ea79c599SRichard Henderson
1477ea79c599SRichard Henderson /**
1478ec4145f7SRichard Henderson * kvm_arm_copy_hw_debug_data:
1479ec4145f7SRichard Henderson * @ptr: kvm_guest_debug_arch structure
1480ec4145f7SRichard Henderson *
1481ec4145f7SRichard Henderson * Copy the architecture specific debug registers into the
1482ec4145f7SRichard Henderson * kvm_guest_debug ioctl structure.
1483ec4145f7SRichard Henderson */
kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch * ptr)1484ec4145f7SRichard Henderson static void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
1485ec4145f7SRichard Henderson {
1486ec4145f7SRichard Henderson int i;
1487ec4145f7SRichard Henderson memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
1488ec4145f7SRichard Henderson
1489ec4145f7SRichard Henderson for (i = 0; i < max_hw_wps; i++) {
1490ec4145f7SRichard Henderson HWWatchpoint *wp = get_hw_wp(i);
1491ec4145f7SRichard Henderson ptr->dbg_wcr[i] = wp->wcr;
1492ec4145f7SRichard Henderson ptr->dbg_wvr[i] = wp->wvr;
1493ec4145f7SRichard Henderson }
1494ec4145f7SRichard Henderson for (i = 0; i < max_hw_bps; i++) {
1495ec4145f7SRichard Henderson HWBreakpoint *bp = get_hw_bp(i);
1496ec4145f7SRichard Henderson ptr->dbg_bcr[i] = bp->bcr;
1497ec4145f7SRichard Henderson ptr->dbg_bvr[i] = bp->bvr;
1498ec4145f7SRichard Henderson }
1499ec4145f7SRichard Henderson }
1500ec4145f7SRichard Henderson
kvm_arch_update_guest_debug(CPUState * cs,struct kvm_guest_debug * dbg)1501fcf5ef2aSThomas Huth void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
1502fcf5ef2aSThomas Huth {
1503fcf5ef2aSThomas Huth if (kvm_sw_breakpoints_active(cs)) {
1504fcf5ef2aSThomas Huth dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1505fcf5ef2aSThomas Huth }
1506366bf10eSPhilippe Mathieu-Daudé if (kvm_arm_hw_debug_active(ARM_CPU(cs))) {
1507fcf5ef2aSThomas Huth dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
1508fcf5ef2aSThomas Huth kvm_arm_copy_hw_debug_data(&dbg->arch);
1509fcf5ef2aSThomas Huth }
1510fcf5ef2aSThomas Huth }
1511fcf5ef2aSThomas Huth
kvm_arch_init_irq_routing(KVMState * s)1512fcf5ef2aSThomas Huth void kvm_arch_init_irq_routing(KVMState *s)
1513fcf5ef2aSThomas Huth {
1514fcf5ef2aSThomas Huth }
1515fcf5ef2aSThomas Huth
kvm_arch_irqchip_create(KVMState * s)15164376c40dSPaolo Bonzini int kvm_arch_irqchip_create(KVMState *s)
1517fcf5ef2aSThomas Huth {
15184376c40dSPaolo Bonzini if (kvm_kernel_irqchip_split()) {
151947c182feSCornelia Huck error_report("-machine kernel_irqchip=split is not supported on ARM.");
1520fcf5ef2aSThomas Huth exit(1);
1521fcf5ef2aSThomas Huth }
1522fcf5ef2aSThomas Huth
1523fcf5ef2aSThomas Huth /* If we can create the VGIC using the newer device control API, we
1524fcf5ef2aSThomas Huth * let the device do this when it initializes itself, otherwise we
1525fcf5ef2aSThomas Huth * fall back to the old API */
1526fcf5ef2aSThomas Huth return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
1527fcf5ef2aSThomas Huth }
1528fcf5ef2aSThomas Huth
kvm_arm_vgic_probe(void)1529fcf5ef2aSThomas Huth int kvm_arm_vgic_probe(void)
1530fcf5ef2aSThomas Huth {
1531d45efe47SEric Auger int val = 0;
1532d45efe47SEric Auger
1533fcf5ef2aSThomas Huth if (kvm_create_device(kvm_state,
1534fcf5ef2aSThomas Huth KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
1535d45efe47SEric Auger val |= KVM_ARM_VGIC_V3;
1536fcf5ef2aSThomas Huth }
1537d45efe47SEric Auger if (kvm_create_device(kvm_state,
1538d45efe47SEric Auger KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
1539d45efe47SEric Auger val |= KVM_ARM_VGIC_V2;
1540d45efe47SEric Auger }
1541d45efe47SEric Auger return val;
1542fcf5ef2aSThomas Huth }
1543fcf5ef2aSThomas Huth
kvm_arm_set_irq(int cpu,int irqtype,int irq,int level)1544f6530926SEric Auger int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level)
1545f6530926SEric Auger {
1546f6530926SEric Auger int kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT) | irq;
1547f6530926SEric Auger int cpu_idx1 = cpu % 256;
1548f6530926SEric Auger int cpu_idx2 = cpu / 256;
1549f6530926SEric Auger
1550f6530926SEric Auger kvm_irq |= (cpu_idx1 << KVM_ARM_IRQ_VCPU_SHIFT) |
1551f6530926SEric Auger (cpu_idx2 << KVM_ARM_IRQ_VCPU2_SHIFT);
1552f6530926SEric Auger
1553f6530926SEric Auger return kvm_set_irq(kvm_state, kvm_irq, !!level);
1554f6530926SEric Auger }
1555f6530926SEric Auger
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)1556fcf5ef2aSThomas Huth int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1557fcf5ef2aSThomas Huth uint64_t address, uint32_t data, PCIDevice *dev)
1558fcf5ef2aSThomas Huth {
1559b05c81d2SEric Auger AddressSpace *as = pci_device_iommu_address_space(dev);
1560b05c81d2SEric Auger hwaddr xlat, len, doorbell_gpa;
1561b05c81d2SEric Auger MemoryRegionSection mrs;
1562b05c81d2SEric Auger MemoryRegion *mr;
1563b05c81d2SEric Auger
1564b05c81d2SEric Auger if (as == &address_space_memory) {
1565fcf5ef2aSThomas Huth return 0;
1566fcf5ef2aSThomas Huth }
1567fcf5ef2aSThomas Huth
1568b05c81d2SEric Auger /* MSI doorbell address is translated by an IOMMU */
1569b05c81d2SEric Auger
1570dfa0d9b8SHamza Mahfooz RCU_READ_LOCK_GUARD();
1571dfa0d9b8SHamza Mahfooz
1572bc6b1cecSPeter Maydell mr = address_space_translate(as, address, &xlat, &len, true,
1573bc6b1cecSPeter Maydell MEMTXATTRS_UNSPECIFIED);
1574dfa0d9b8SHamza Mahfooz
1575b05c81d2SEric Auger if (!mr) {
1576dfa0d9b8SHamza Mahfooz return 1;
1577b05c81d2SEric Auger }
1578dfa0d9b8SHamza Mahfooz
1579b05c81d2SEric Auger mrs = memory_region_find(mr, xlat, 1);
1580dfa0d9b8SHamza Mahfooz
1581b05c81d2SEric Auger if (!mrs.mr) {
1582dfa0d9b8SHamza Mahfooz return 1;
1583b05c81d2SEric Auger }
1584b05c81d2SEric Auger
1585b05c81d2SEric Auger doorbell_gpa = mrs.offset_within_address_space;
1586b05c81d2SEric Auger memory_region_unref(mrs.mr);
1587b05c81d2SEric Auger
1588b05c81d2SEric Auger route->u.msi.address_lo = doorbell_gpa;
1589b05c81d2SEric Auger route->u.msi.address_hi = doorbell_gpa >> 32;
1590b05c81d2SEric Auger
1591b05c81d2SEric Auger trace_kvm_arm_fixup_msi_route(address, doorbell_gpa);
1592b05c81d2SEric Auger
1593dfa0d9b8SHamza Mahfooz return 0;
1594b05c81d2SEric Auger }
1595b05c81d2SEric Auger
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)1596fcf5ef2aSThomas Huth int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1597fcf5ef2aSThomas Huth int vector, PCIDevice *dev)
1598fcf5ef2aSThomas Huth {
1599fcf5ef2aSThomas Huth return 0;
1600fcf5ef2aSThomas Huth }
1601fcf5ef2aSThomas Huth
kvm_arch_release_virq_post(int virq)1602fcf5ef2aSThomas Huth int kvm_arch_release_virq_post(int virq)
1603fcf5ef2aSThomas Huth {
1604fcf5ef2aSThomas Huth return 0;
1605fcf5ef2aSThomas Huth }
1606fcf5ef2aSThomas Huth
kvm_arch_msi_data_to_gsi(uint32_t data)1607fcf5ef2aSThomas Huth int kvm_arch_msi_data_to_gsi(uint32_t data)
1608fcf5ef2aSThomas Huth {
1609fcf5ef2aSThomas Huth return (data - 32) & 0xffff;
1610fcf5ef2aSThomas Huth }
161192a5199bSTom Lendacky
kvm_arch_get_eager_split_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1612c8f2eb5dSShameer Kolothum static void kvm_arch_get_eager_split_size(Object *obj, Visitor *v,
1613c8f2eb5dSShameer Kolothum const char *name, void *opaque,
1614c8f2eb5dSShameer Kolothum Error **errp)
1615c8f2eb5dSShameer Kolothum {
1616c8f2eb5dSShameer Kolothum KVMState *s = KVM_STATE(obj);
1617c8f2eb5dSShameer Kolothum uint64_t value = s->kvm_eager_split_size;
1618c8f2eb5dSShameer Kolothum
1619c8f2eb5dSShameer Kolothum visit_type_size(v, name, &value, errp);
1620c8f2eb5dSShameer Kolothum }
1621c8f2eb5dSShameer Kolothum
kvm_arch_set_eager_split_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1622c8f2eb5dSShameer Kolothum static void kvm_arch_set_eager_split_size(Object *obj, Visitor *v,
1623c8f2eb5dSShameer Kolothum const char *name, void *opaque,
1624c8f2eb5dSShameer Kolothum Error **errp)
1625c8f2eb5dSShameer Kolothum {
1626c8f2eb5dSShameer Kolothum KVMState *s = KVM_STATE(obj);
1627c8f2eb5dSShameer Kolothum uint64_t value;
1628c8f2eb5dSShameer Kolothum
1629c8f2eb5dSShameer Kolothum if (s->fd != -1) {
1630c8f2eb5dSShameer Kolothum error_setg(errp, "Unable to set early-split-size after KVM has been initialized");
1631c8f2eb5dSShameer Kolothum return;
1632c8f2eb5dSShameer Kolothum }
1633c8f2eb5dSShameer Kolothum
1634c8f2eb5dSShameer Kolothum if (!visit_type_size(v, name, &value, errp)) {
1635c8f2eb5dSShameer Kolothum return;
1636c8f2eb5dSShameer Kolothum }
1637c8f2eb5dSShameer Kolothum
1638c8f2eb5dSShameer Kolothum if (value && !is_power_of_2(value)) {
1639c8f2eb5dSShameer Kolothum error_setg(errp, "early-split-size must be a power of two");
1640c8f2eb5dSShameer Kolothum return;
1641c8f2eb5dSShameer Kolothum }
1642c8f2eb5dSShameer Kolothum
1643c8f2eb5dSShameer Kolothum s->kvm_eager_split_size = value;
1644c8f2eb5dSShameer Kolothum }
1645c8f2eb5dSShameer Kolothum
kvm_arch_accel_class_init(ObjectClass * oc)16463dba0a33SPaolo Bonzini void kvm_arch_accel_class_init(ObjectClass *oc)
16473dba0a33SPaolo Bonzini {
1648c8f2eb5dSShameer Kolothum object_class_property_add(oc, "eager-split-size", "size",
1649c8f2eb5dSShameer Kolothum kvm_arch_get_eager_split_size,
1650c8f2eb5dSShameer Kolothum kvm_arch_set_eager_split_size, NULL, NULL);
1651c8f2eb5dSShameer Kolothum
1652c8f2eb5dSShameer Kolothum object_class_property_set_description(oc, "eager-split-size",
1653c8f2eb5dSShameer Kolothum "Eager Page Split chunk size for hugepages. (default: 0, disabled)");
16543dba0a33SPaolo Bonzini }
1655de3c9601SRichard Henderson
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)1656de3c9601SRichard Henderson int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
1657de3c9601SRichard Henderson {
1658de3c9601SRichard Henderson switch (type) {
1659de3c9601SRichard Henderson case GDB_BREAKPOINT_HW:
1660de3c9601SRichard Henderson return insert_hw_breakpoint(addr);
1661de3c9601SRichard Henderson break;
1662de3c9601SRichard Henderson case GDB_WATCHPOINT_READ:
1663de3c9601SRichard Henderson case GDB_WATCHPOINT_WRITE:
1664de3c9601SRichard Henderson case GDB_WATCHPOINT_ACCESS:
1665de3c9601SRichard Henderson return insert_hw_watchpoint(addr, len, type);
1666de3c9601SRichard Henderson default:
1667de3c9601SRichard Henderson return -ENOSYS;
1668de3c9601SRichard Henderson }
1669de3c9601SRichard Henderson }
1670de3c9601SRichard Henderson
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)1671de3c9601SRichard Henderson int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
1672de3c9601SRichard Henderson {
1673de3c9601SRichard Henderson switch (type) {
1674de3c9601SRichard Henderson case GDB_BREAKPOINT_HW:
1675de3c9601SRichard Henderson return delete_hw_breakpoint(addr);
1676de3c9601SRichard Henderson case GDB_WATCHPOINT_READ:
1677de3c9601SRichard Henderson case GDB_WATCHPOINT_WRITE:
1678de3c9601SRichard Henderson case GDB_WATCHPOINT_ACCESS:
1679de3c9601SRichard Henderson return delete_hw_watchpoint(addr, len, type);
1680de3c9601SRichard Henderson default:
1681de3c9601SRichard Henderson return -ENOSYS;
1682de3c9601SRichard Henderson }
1683de3c9601SRichard Henderson }
1684de3c9601SRichard Henderson
kvm_arch_remove_all_hw_breakpoints(void)1685de3c9601SRichard Henderson void kvm_arch_remove_all_hw_breakpoints(void)
1686de3c9601SRichard Henderson {
1687de3c9601SRichard Henderson if (cur_hw_wps > 0) {
1688de3c9601SRichard Henderson g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
1689de3c9601SRichard Henderson }
1690de3c9601SRichard Henderson if (cur_hw_bps > 0) {
1691de3c9601SRichard Henderson g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
1692de3c9601SRichard Henderson }
1693de3c9601SRichard Henderson }
1694de3c9601SRichard Henderson
kvm_arm_set_device_attr(ARMCPU * cpu,struct kvm_device_attr * attr,const char * name)1695e77034f7SPhilippe Mathieu-Daudé static bool kvm_arm_set_device_attr(ARMCPU *cpu, struct kvm_device_attr *attr,
1696de3c9601SRichard Henderson const char *name)
1697de3c9601SRichard Henderson {
1698de3c9601SRichard Henderson int err;
1699de3c9601SRichard Henderson
1700e77034f7SPhilippe Mathieu-Daudé err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr);
1701de3c9601SRichard Henderson if (err != 0) {
1702de3c9601SRichard Henderson error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
1703de3c9601SRichard Henderson return false;
1704de3c9601SRichard Henderson }
1705de3c9601SRichard Henderson
1706e77034f7SPhilippe Mathieu-Daudé err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr);
1707de3c9601SRichard Henderson if (err != 0) {
1708de3c9601SRichard Henderson error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
1709de3c9601SRichard Henderson return false;
1710de3c9601SRichard Henderson }
1711de3c9601SRichard Henderson
1712de3c9601SRichard Henderson return true;
1713de3c9601SRichard Henderson }
1714de3c9601SRichard Henderson
kvm_arm_pmu_init(ARMCPU * cpu)1715d344f5baSPhilippe Mathieu-Daudé void kvm_arm_pmu_init(ARMCPU *cpu)
1716de3c9601SRichard Henderson {
1717de3c9601SRichard Henderson struct kvm_device_attr attr = {
1718de3c9601SRichard Henderson .group = KVM_ARM_VCPU_PMU_V3_CTRL,
1719de3c9601SRichard Henderson .attr = KVM_ARM_VCPU_PMU_V3_INIT,
1720de3c9601SRichard Henderson };
1721de3c9601SRichard Henderson
1722d344f5baSPhilippe Mathieu-Daudé if (!cpu->has_pmu) {
1723de3c9601SRichard Henderson return;
1724de3c9601SRichard Henderson }
1725d344f5baSPhilippe Mathieu-Daudé if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) {
1726de3c9601SRichard Henderson error_report("failed to init PMU");
1727de3c9601SRichard Henderson abort();
1728de3c9601SRichard Henderson }
1729de3c9601SRichard Henderson }
1730de3c9601SRichard Henderson
kvm_arm_pmu_set_irq(ARMCPU * cpu,int irq)17315ed84f3bSPhilippe Mathieu-Daudé void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
1732de3c9601SRichard Henderson {
1733de3c9601SRichard Henderson struct kvm_device_attr attr = {
1734de3c9601SRichard Henderson .group = KVM_ARM_VCPU_PMU_V3_CTRL,
1735de3c9601SRichard Henderson .addr = (intptr_t)&irq,
1736de3c9601SRichard Henderson .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
1737de3c9601SRichard Henderson };
1738de3c9601SRichard Henderson
17395ed84f3bSPhilippe Mathieu-Daudé if (!cpu->has_pmu) {
1740de3c9601SRichard Henderson return;
1741de3c9601SRichard Henderson }
17425ed84f3bSPhilippe Mathieu-Daudé if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) {
1743de3c9601SRichard Henderson error_report("failed to set irq for PMU");
1744de3c9601SRichard Henderson abort();
1745de3c9601SRichard Henderson }
1746de3c9601SRichard Henderson }
1747de3c9601SRichard Henderson
kvm_arm_pvtime_init(ARMCPU * cpu,uint64_t ipa)174855503372SPhilippe Mathieu-Daudé void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
1749de3c9601SRichard Henderson {
1750de3c9601SRichard Henderson struct kvm_device_attr attr = {
1751de3c9601SRichard Henderson .group = KVM_ARM_VCPU_PVTIME_CTRL,
1752de3c9601SRichard Henderson .attr = KVM_ARM_VCPU_PVTIME_IPA,
1753de3c9601SRichard Henderson .addr = (uint64_t)&ipa,
1754de3c9601SRichard Henderson };
1755de3c9601SRichard Henderson
175655503372SPhilippe Mathieu-Daudé if (cpu->kvm_steal_time == ON_OFF_AUTO_OFF) {
1757de3c9601SRichard Henderson return;
1758de3c9601SRichard Henderson }
175955503372SPhilippe Mathieu-Daudé if (!kvm_arm_set_device_attr(cpu, &attr, "PVTIME IPA")) {
1760de3c9601SRichard Henderson error_report("failed to init PVTIME IPA");
1761de3c9601SRichard Henderson abort();
1762de3c9601SRichard Henderson }
1763de3c9601SRichard Henderson }
1764de3c9601SRichard Henderson
kvm_arm_steal_time_finalize(ARMCPU * cpu,Error ** errp)1765de3c9601SRichard Henderson void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
1766de3c9601SRichard Henderson {
1767de3c9601SRichard Henderson bool has_steal_time = kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
1768de3c9601SRichard Henderson
1769de3c9601SRichard Henderson if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
1770de3c9601SRichard Henderson if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1771de3c9601SRichard Henderson cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
1772de3c9601SRichard Henderson } else {
1773de3c9601SRichard Henderson cpu->kvm_steal_time = ON_OFF_AUTO_ON;
1774de3c9601SRichard Henderson }
1775de3c9601SRichard Henderson } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
1776de3c9601SRichard Henderson if (!has_steal_time) {
1777de3c9601SRichard Henderson error_setg(errp, "'kvm-steal-time' cannot be enabled "
1778de3c9601SRichard Henderson "on this host");
1779de3c9601SRichard Henderson return;
1780de3c9601SRichard Henderson } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1781de3c9601SRichard Henderson /*
1782de3c9601SRichard Henderson * DEN0057A chapter 2 says "This specification only covers
1783de3c9601SRichard Henderson * systems in which the Execution state of the hypervisor
1784de3c9601SRichard Henderson * as well as EL1 of virtual machines is AArch64.". And,
1785de3c9601SRichard Henderson * to ensure that, the smc/hvc calls are only specified as
1786de3c9601SRichard Henderson * smc64/hvc64.
1787de3c9601SRichard Henderson */
1788de3c9601SRichard Henderson error_setg(errp, "'kvm-steal-time' cannot be enabled "
1789de3c9601SRichard Henderson "for AArch32 guests");
1790de3c9601SRichard Henderson return;
1791de3c9601SRichard Henderson }
1792de3c9601SRichard Henderson }
1793de3c9601SRichard Henderson }
1794de3c9601SRichard Henderson
kvm_arm_aarch32_supported(void)1795de3c9601SRichard Henderson bool kvm_arm_aarch32_supported(void)
1796de3c9601SRichard Henderson {
1797de3c9601SRichard Henderson return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
1798de3c9601SRichard Henderson }
1799de3c9601SRichard Henderson
kvm_arm_sve_supported(void)1800de3c9601SRichard Henderson bool kvm_arm_sve_supported(void)
1801de3c9601SRichard Henderson {
1802de3c9601SRichard Henderson return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
1803de3c9601SRichard Henderson }
1804de3c9601SRichard Henderson
kvm_arm_mte_supported(void)1805918d0de0SCornelia Huck bool kvm_arm_mte_supported(void)
1806918d0de0SCornelia Huck {
1807918d0de0SCornelia Huck return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
1808918d0de0SCornelia Huck }
1809918d0de0SCornelia Huck
1810de3c9601SRichard Henderson QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
1811de3c9601SRichard Henderson
kvm_arm_sve_get_vls(ARMCPU * cpu)1812d6339282SPhilippe Mathieu-Daudé uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
1813de3c9601SRichard Henderson {
1814de3c9601SRichard Henderson /* Only call this function if kvm_arm_sve_supported() returns true. */
1815de3c9601SRichard Henderson static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
1816de3c9601SRichard Henderson static bool probed;
1817de3c9601SRichard Henderson uint32_t vq = 0;
1818de3c9601SRichard Henderson int i;
1819de3c9601SRichard Henderson
1820de3c9601SRichard Henderson /*
1821de3c9601SRichard Henderson * KVM ensures all host CPUs support the same set of vector lengths.
1822de3c9601SRichard Henderson * So we only need to create the scratch VCPUs once and then cache
1823de3c9601SRichard Henderson * the results.
1824de3c9601SRichard Henderson */
1825de3c9601SRichard Henderson if (!probed) {
1826de3c9601SRichard Henderson struct kvm_vcpu_init init = {
1827de3c9601SRichard Henderson .target = -1,
1828de3c9601SRichard Henderson .features[0] = (1 << KVM_ARM_VCPU_SVE),
1829de3c9601SRichard Henderson };
1830de3c9601SRichard Henderson struct kvm_one_reg reg = {
1831de3c9601SRichard Henderson .id = KVM_REG_ARM64_SVE_VLS,
1832de3c9601SRichard Henderson .addr = (uint64_t)&vls[0],
1833de3c9601SRichard Henderson };
1834de3c9601SRichard Henderson int fdarray[3], ret;
1835de3c9601SRichard Henderson
1836de3c9601SRichard Henderson probed = true;
1837de3c9601SRichard Henderson
1838de3c9601SRichard Henderson if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
1839de3c9601SRichard Henderson error_report("failed to create scratch VCPU with SVE enabled");
1840de3c9601SRichard Henderson abort();
1841de3c9601SRichard Henderson }
1842de3c9601SRichard Henderson ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®);
1843de3c9601SRichard Henderson kvm_arm_destroy_scratch_host_vcpu(fdarray);
1844de3c9601SRichard Henderson if (ret) {
1845de3c9601SRichard Henderson error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
1846de3c9601SRichard Henderson strerror(errno));
1847de3c9601SRichard Henderson abort();
1848de3c9601SRichard Henderson }
1849de3c9601SRichard Henderson
1850de3c9601SRichard Henderson for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
1851de3c9601SRichard Henderson if (vls[i]) {
1852de3c9601SRichard Henderson vq = 64 - clz64(vls[i]) + i * 64;
1853de3c9601SRichard Henderson break;
1854de3c9601SRichard Henderson }
1855de3c9601SRichard Henderson }
1856de3c9601SRichard Henderson if (vq > ARM_MAX_VQ) {
1857de3c9601SRichard Henderson warn_report("KVM supports vector lengths larger than "
1858de3c9601SRichard Henderson "QEMU can enable");
1859de3c9601SRichard Henderson vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ);
1860de3c9601SRichard Henderson }
1861de3c9601SRichard Henderson }
1862de3c9601SRichard Henderson
1863de3c9601SRichard Henderson return vls[0];
1864de3c9601SRichard Henderson }
1865de3c9601SRichard Henderson
kvm_arm_sve_set_vls(ARMCPU * cpu)1866bc1b09b3SPhilippe Mathieu-Daudé static int kvm_arm_sve_set_vls(ARMCPU *cpu)
1867de3c9601SRichard Henderson {
1868de3c9601SRichard Henderson uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map };
1869de3c9601SRichard Henderson
1870de3c9601SRichard Henderson assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
1871de3c9601SRichard Henderson
1872bc1b09b3SPhilippe Mathieu-Daudé return kvm_set_one_reg(CPU(cpu), KVM_REG_ARM64_SVE_VLS, &vls[0]);
1873de3c9601SRichard Henderson }
1874de3c9601SRichard Henderson
1875de3c9601SRichard Henderson #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
1876de3c9601SRichard Henderson
kvm_arch_init_vcpu(CPUState * cs)1877de3c9601SRichard Henderson int kvm_arch_init_vcpu(CPUState *cs)
1878de3c9601SRichard Henderson {
1879de3c9601SRichard Henderson int ret;
1880de3c9601SRichard Henderson uint64_t mpidr;
1881de3c9601SRichard Henderson ARMCPU *cpu = ARM_CPU(cs);
1882de3c9601SRichard Henderson CPUARMState *env = &cpu->env;
1883de3c9601SRichard Henderson uint64_t psciver;
1884de3c9601SRichard Henderson
1885de3c9601SRichard Henderson if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
1886de3c9601SRichard Henderson !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
1887de3c9601SRichard Henderson error_report("KVM is not supported for this guest CPU type");
1888de3c9601SRichard Henderson return -EINVAL;
1889de3c9601SRichard Henderson }
1890de3c9601SRichard Henderson
189176acc987SPhilippe Mathieu-Daudé qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cpu);
1892de3c9601SRichard Henderson
1893de3c9601SRichard Henderson /* Determine init features for this CPU */
1894de3c9601SRichard Henderson memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
1895de3c9601SRichard Henderson if (cs->start_powered_off) {
1896de3c9601SRichard Henderson cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
1897de3c9601SRichard Henderson }
1898de3c9601SRichard Henderson if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
1899de3c9601SRichard Henderson cpu->psci_version = QEMU_PSCI_VERSION_0_2;
1900de3c9601SRichard Henderson cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
1901de3c9601SRichard Henderson }
1902ee1004bbSPhilippe Mathieu-Daudé if (!arm_feature(env, ARM_FEATURE_AARCH64)) {
1903de3c9601SRichard Henderson cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
1904de3c9601SRichard Henderson }
1905de3c9601SRichard Henderson if (cpu->has_pmu) {
1906de3c9601SRichard Henderson cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
1907de3c9601SRichard Henderson }
1908de3c9601SRichard Henderson if (cpu_isar_feature(aa64_sve, cpu)) {
1909de3c9601SRichard Henderson assert(kvm_arm_sve_supported());
1910de3c9601SRichard Henderson cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
1911de3c9601SRichard Henderson }
1912de3c9601SRichard Henderson if (cpu_isar_feature(aa64_pauth, cpu)) {
1913de3c9601SRichard Henderson cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
1914de3c9601SRichard Henderson 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
1915de3c9601SRichard Henderson }
1916de3c9601SRichard Henderson
1917de3c9601SRichard Henderson /* Do KVM_ARM_VCPU_INIT ioctl */
1918bbb22d58SPhilippe Mathieu-Daudé ret = kvm_arm_vcpu_init(cpu);
1919de3c9601SRichard Henderson if (ret) {
1920de3c9601SRichard Henderson return ret;
1921de3c9601SRichard Henderson }
1922de3c9601SRichard Henderson
1923de3c9601SRichard Henderson if (cpu_isar_feature(aa64_sve, cpu)) {
1924bc1b09b3SPhilippe Mathieu-Daudé ret = kvm_arm_sve_set_vls(cpu);
1925de3c9601SRichard Henderson if (ret) {
1926de3c9601SRichard Henderson return ret;
1927de3c9601SRichard Henderson }
19280d31a631SPhilippe Mathieu-Daudé ret = kvm_arm_vcpu_finalize(cpu, KVM_ARM_VCPU_SVE);
1929de3c9601SRichard Henderson if (ret) {
1930de3c9601SRichard Henderson return ret;
1931de3c9601SRichard Henderson }
1932de3c9601SRichard Henderson }
1933de3c9601SRichard Henderson
1934de3c9601SRichard Henderson /*
1935de3c9601SRichard Henderson * KVM reports the exact PSCI version it is implementing via a
1936de3c9601SRichard Henderson * special sysreg. If it is present, use its contents to determine
1937de3c9601SRichard Henderson * what to report to the guest in the dtb (it is the PSCI version,
1938de3c9601SRichard Henderson * in the same 15-bits major 16-bits minor format that PSCI_VERSION
1939de3c9601SRichard Henderson * returns).
1940de3c9601SRichard Henderson */
1941de3c9601SRichard Henderson if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) {
1942de3c9601SRichard Henderson cpu->psci_version = psciver;
1943de3c9601SRichard Henderson }
1944de3c9601SRichard Henderson
1945de3c9601SRichard Henderson /*
1946de3c9601SRichard Henderson * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
1947de3c9601SRichard Henderson * Currently KVM has its own idea about MPIDR assignment, so we
1948de3c9601SRichard Henderson * override our defaults with what we get from KVM.
1949de3c9601SRichard Henderson */
1950de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
1951de3c9601SRichard Henderson if (ret) {
1952de3c9601SRichard Henderson return ret;
1953de3c9601SRichard Henderson }
1954de3c9601SRichard Henderson cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
1955de3c9601SRichard Henderson
1956de3c9601SRichard Henderson return kvm_arm_init_cpreg_list(cpu);
1957de3c9601SRichard Henderson }
1958de3c9601SRichard Henderson
kvm_arch_destroy_vcpu(CPUState * cs)1959de3c9601SRichard Henderson int kvm_arch_destroy_vcpu(CPUState *cs)
1960de3c9601SRichard Henderson {
1961de3c9601SRichard Henderson return 0;
1962de3c9601SRichard Henderson }
1963de3c9601SRichard Henderson
1964de3c9601SRichard Henderson /* Callers must hold the iothread mutex lock */
kvm_inject_arm_sea(CPUState * c)1965de3c9601SRichard Henderson static void kvm_inject_arm_sea(CPUState *c)
1966de3c9601SRichard Henderson {
1967de3c9601SRichard Henderson ARMCPU *cpu = ARM_CPU(c);
1968de3c9601SRichard Henderson CPUARMState *env = &cpu->env;
1969de3c9601SRichard Henderson uint32_t esr;
1970de3c9601SRichard Henderson bool same_el;
1971de3c9601SRichard Henderson
1972de3c9601SRichard Henderson c->exception_index = EXCP_DATA_ABORT;
1973de3c9601SRichard Henderson env->exception.target_el = 1;
1974de3c9601SRichard Henderson
1975de3c9601SRichard Henderson /*
1976de3c9601SRichard Henderson * Set the DFSC to synchronous external abort and set FnV to not valid,
1977de3c9601SRichard Henderson * this will tell guest the FAR_ELx is UNKNOWN for this abort.
1978de3c9601SRichard Henderson */
1979de3c9601SRichard Henderson same_el = arm_current_el(env) == env->exception.target_el;
1980de3c9601SRichard Henderson esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10);
1981de3c9601SRichard Henderson
1982de3c9601SRichard Henderson env->exception.syndrome = esr;
1983de3c9601SRichard Henderson
1984de3c9601SRichard Henderson arm_cpu_do_interrupt(c);
1985de3c9601SRichard Henderson }
1986de3c9601SRichard Henderson
1987de3c9601SRichard Henderson #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
1988de3c9601SRichard Henderson KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1989de3c9601SRichard Henderson
1990de3c9601SRichard Henderson #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
1991de3c9601SRichard Henderson KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1992de3c9601SRichard Henderson
1993de3c9601SRichard Henderson #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
1994de3c9601SRichard Henderson KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1995de3c9601SRichard Henderson
kvm_arch_put_fpsimd(CPUState * cs)1996de3c9601SRichard Henderson static int kvm_arch_put_fpsimd(CPUState *cs)
1997de3c9601SRichard Henderson {
1998de3c9601SRichard Henderson CPUARMState *env = &ARM_CPU(cs)->env;
1999de3c9601SRichard Henderson int i, ret;
2000de3c9601SRichard Henderson
2001de3c9601SRichard Henderson for (i = 0; i < 32; i++) {
2002de3c9601SRichard Henderson uint64_t *q = aa64_vfp_qreg(env, i);
2003de3c9601SRichard Henderson #if HOST_BIG_ENDIAN
2004de3c9601SRichard Henderson uint64_t fp_val[2] = { q[1], q[0] };
2005de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]),
2006de3c9601SRichard Henderson fp_val);
2007de3c9601SRichard Henderson #else
2008de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
2009de3c9601SRichard Henderson #endif
2010de3c9601SRichard Henderson if (ret) {
2011de3c9601SRichard Henderson return ret;
2012de3c9601SRichard Henderson }
2013de3c9601SRichard Henderson }
2014de3c9601SRichard Henderson
2015de3c9601SRichard Henderson return 0;
2016de3c9601SRichard Henderson }
2017de3c9601SRichard Henderson
2018de3c9601SRichard Henderson /*
2019de3c9601SRichard Henderson * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
2020de3c9601SRichard Henderson * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
2021de3c9601SRichard Henderson * code the slice index to zero for now as it's unlikely we'll need more than
2022de3c9601SRichard Henderson * one slice for quite some time.
2023de3c9601SRichard Henderson */
kvm_arch_put_sve(CPUState * cs)2024de3c9601SRichard Henderson static int kvm_arch_put_sve(CPUState *cs)
2025de3c9601SRichard Henderson {
2026de3c9601SRichard Henderson ARMCPU *cpu = ARM_CPU(cs);
2027de3c9601SRichard Henderson CPUARMState *env = &cpu->env;
2028de3c9601SRichard Henderson uint64_t tmp[ARM_MAX_VQ * 2];
2029de3c9601SRichard Henderson uint64_t *r;
2030de3c9601SRichard Henderson int n, ret;
2031de3c9601SRichard Henderson
2032de3c9601SRichard Henderson for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
2033de3c9601SRichard Henderson r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
2034de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
2035de3c9601SRichard Henderson if (ret) {
2036de3c9601SRichard Henderson return ret;
2037de3c9601SRichard Henderson }
2038de3c9601SRichard Henderson }
2039de3c9601SRichard Henderson
2040de3c9601SRichard Henderson for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
2041de3c9601SRichard Henderson r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
2042de3c9601SRichard Henderson DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
2043de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
2044de3c9601SRichard Henderson if (ret) {
2045de3c9601SRichard Henderson return ret;
2046de3c9601SRichard Henderson }
2047de3c9601SRichard Henderson }
2048de3c9601SRichard Henderson
2049de3c9601SRichard Henderson r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
2050de3c9601SRichard Henderson DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
2051de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
2052de3c9601SRichard Henderson if (ret) {
2053de3c9601SRichard Henderson return ret;
2054de3c9601SRichard Henderson }
2055de3c9601SRichard Henderson
2056de3c9601SRichard Henderson return 0;
2057de3c9601SRichard Henderson }
2058de3c9601SRichard Henderson
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)2059a1676bb3SJulia Suvorova int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
2060de3c9601SRichard Henderson {
2061de3c9601SRichard Henderson uint64_t val;
2062de3c9601SRichard Henderson uint32_t fpr;
2063de3c9601SRichard Henderson int i, ret;
2064de3c9601SRichard Henderson unsigned int el;
2065de3c9601SRichard Henderson
2066de3c9601SRichard Henderson ARMCPU *cpu = ARM_CPU(cs);
2067de3c9601SRichard Henderson CPUARMState *env = &cpu->env;
2068de3c9601SRichard Henderson
2069de3c9601SRichard Henderson /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
2070de3c9601SRichard Henderson * AArch64 registers before pushing them out to 64-bit KVM.
2071de3c9601SRichard Henderson */
2072de3c9601SRichard Henderson if (!is_a64(env)) {
2073de3c9601SRichard Henderson aarch64_sync_32_to_64(env);
2074de3c9601SRichard Henderson }
2075de3c9601SRichard Henderson
2076de3c9601SRichard Henderson for (i = 0; i < 31; i++) {
2077de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
2078de3c9601SRichard Henderson &env->xregs[i]);
2079de3c9601SRichard Henderson if (ret) {
2080de3c9601SRichard Henderson return ret;
2081de3c9601SRichard Henderson }
2082de3c9601SRichard Henderson }
2083de3c9601SRichard Henderson
2084de3c9601SRichard Henderson /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
2085de3c9601SRichard Henderson * QEMU side we keep the current SP in xregs[31] as well.
2086de3c9601SRichard Henderson */
2087de3c9601SRichard Henderson aarch64_save_sp(env, 1);
2088de3c9601SRichard Henderson
2089de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
2090de3c9601SRichard Henderson if (ret) {
2091de3c9601SRichard Henderson return ret;
2092de3c9601SRichard Henderson }
2093de3c9601SRichard Henderson
2094de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
2095de3c9601SRichard Henderson if (ret) {
2096de3c9601SRichard Henderson return ret;
2097de3c9601SRichard Henderson }
2098de3c9601SRichard Henderson
2099de3c9601SRichard Henderson /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
2100de3c9601SRichard Henderson if (is_a64(env)) {
2101de3c9601SRichard Henderson val = pstate_read(env);
2102de3c9601SRichard Henderson } else {
2103de3c9601SRichard Henderson val = cpsr_read(env);
2104de3c9601SRichard Henderson }
2105de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
2106de3c9601SRichard Henderson if (ret) {
2107de3c9601SRichard Henderson return ret;
2108de3c9601SRichard Henderson }
2109de3c9601SRichard Henderson
2110de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
2111de3c9601SRichard Henderson if (ret) {
2112de3c9601SRichard Henderson return ret;
2113de3c9601SRichard Henderson }
2114de3c9601SRichard Henderson
2115de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
2116de3c9601SRichard Henderson if (ret) {
2117de3c9601SRichard Henderson return ret;
2118de3c9601SRichard Henderson }
2119de3c9601SRichard Henderson
2120de3c9601SRichard Henderson /* Saved Program State Registers
2121de3c9601SRichard Henderson *
2122de3c9601SRichard Henderson * Before we restore from the banked_spsr[] array we need to
2123de3c9601SRichard Henderson * ensure that any modifications to env->spsr are correctly
2124de3c9601SRichard Henderson * reflected in the banks.
2125de3c9601SRichard Henderson */
2126de3c9601SRichard Henderson el = arm_current_el(env);
2127de3c9601SRichard Henderson if (el > 0 && !is_a64(env)) {
2128de3c9601SRichard Henderson i = bank_number(env->uncached_cpsr & CPSR_M);
2129de3c9601SRichard Henderson env->banked_spsr[i] = env->spsr;
2130de3c9601SRichard Henderson }
2131de3c9601SRichard Henderson
2132de3c9601SRichard Henderson /* KVM 0-4 map to QEMU banks 1-5 */
2133de3c9601SRichard Henderson for (i = 0; i < KVM_NR_SPSR; i++) {
2134de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
2135de3c9601SRichard Henderson &env->banked_spsr[i + 1]);
2136de3c9601SRichard Henderson if (ret) {
2137de3c9601SRichard Henderson return ret;
2138de3c9601SRichard Henderson }
2139de3c9601SRichard Henderson }
2140de3c9601SRichard Henderson
2141de3c9601SRichard Henderson if (cpu_isar_feature(aa64_sve, cpu)) {
2142de3c9601SRichard Henderson ret = kvm_arch_put_sve(cs);
2143de3c9601SRichard Henderson } else {
2144de3c9601SRichard Henderson ret = kvm_arch_put_fpsimd(cs);
2145de3c9601SRichard Henderson }
2146de3c9601SRichard Henderson if (ret) {
2147de3c9601SRichard Henderson return ret;
2148de3c9601SRichard Henderson }
2149de3c9601SRichard Henderson
2150de3c9601SRichard Henderson fpr = vfp_get_fpsr(env);
2151de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
2152de3c9601SRichard Henderson if (ret) {
2153de3c9601SRichard Henderson return ret;
2154de3c9601SRichard Henderson }
2155de3c9601SRichard Henderson
2156de3c9601SRichard Henderson fpr = vfp_get_fpcr(env);
2157de3c9601SRichard Henderson ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
2158de3c9601SRichard Henderson if (ret) {
2159de3c9601SRichard Henderson return ret;
2160de3c9601SRichard Henderson }
2161de3c9601SRichard Henderson
2162de3c9601SRichard Henderson write_cpustate_to_list(cpu, true);
2163de3c9601SRichard Henderson
2164de3c9601SRichard Henderson if (!write_list_to_kvmstate(cpu, level)) {
2165de3c9601SRichard Henderson return -EINVAL;
2166de3c9601SRichard Henderson }
2167de3c9601SRichard Henderson
2168de3c9601SRichard Henderson /*
2169de3c9601SRichard Henderson * Setting VCPU events should be triggered after syncing the registers
2170de3c9601SRichard Henderson * to avoid overwriting potential changes made by KVM upon calling
2171de3c9601SRichard Henderson * KVM_SET_VCPU_EVENTS ioctl
2172de3c9601SRichard Henderson */
2173de3c9601SRichard Henderson ret = kvm_put_vcpu_events(cpu);
2174de3c9601SRichard Henderson if (ret) {
2175de3c9601SRichard Henderson return ret;
2176de3c9601SRichard Henderson }
2177de3c9601SRichard Henderson
217871c34911SRichard Henderson return kvm_arm_sync_mpstate_to_kvm(cpu);
2179de3c9601SRichard Henderson }
2180de3c9601SRichard Henderson
kvm_arch_get_fpsimd(CPUState * cs)2181de3c9601SRichard Henderson static int kvm_arch_get_fpsimd(CPUState *cs)
2182de3c9601SRichard Henderson {
2183de3c9601SRichard Henderson CPUARMState *env = &ARM_CPU(cs)->env;
2184de3c9601SRichard Henderson int i, ret;
2185de3c9601SRichard Henderson
2186de3c9601SRichard Henderson for (i = 0; i < 32; i++) {
2187de3c9601SRichard Henderson uint64_t *q = aa64_vfp_qreg(env, i);
2188de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
2189de3c9601SRichard Henderson if (ret) {
2190de3c9601SRichard Henderson return ret;
2191de3c9601SRichard Henderson } else {
2192de3c9601SRichard Henderson #if HOST_BIG_ENDIAN
2193de3c9601SRichard Henderson uint64_t t;
2194de3c9601SRichard Henderson t = q[0], q[0] = q[1], q[1] = t;
2195de3c9601SRichard Henderson #endif
2196de3c9601SRichard Henderson }
2197de3c9601SRichard Henderson }
2198de3c9601SRichard Henderson
2199de3c9601SRichard Henderson return 0;
2200de3c9601SRichard Henderson }
2201de3c9601SRichard Henderson
2202de3c9601SRichard Henderson /*
2203de3c9601SRichard Henderson * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
2204de3c9601SRichard Henderson * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
2205de3c9601SRichard Henderson * code the slice index to zero for now as it's unlikely we'll need more than
2206de3c9601SRichard Henderson * one slice for quite some time.
2207de3c9601SRichard Henderson */
kvm_arch_get_sve(CPUState * cs)2208de3c9601SRichard Henderson static int kvm_arch_get_sve(CPUState *cs)
2209de3c9601SRichard Henderson {
2210de3c9601SRichard Henderson ARMCPU *cpu = ARM_CPU(cs);
2211de3c9601SRichard Henderson CPUARMState *env = &cpu->env;
2212de3c9601SRichard Henderson uint64_t *r;
2213de3c9601SRichard Henderson int n, ret;
2214de3c9601SRichard Henderson
2215de3c9601SRichard Henderson for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
2216de3c9601SRichard Henderson r = &env->vfp.zregs[n].d[0];
2217de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
2218de3c9601SRichard Henderson if (ret) {
2219de3c9601SRichard Henderson return ret;
2220de3c9601SRichard Henderson }
2221de3c9601SRichard Henderson sve_bswap64(r, r, cpu->sve_max_vq * 2);
2222de3c9601SRichard Henderson }
2223de3c9601SRichard Henderson
2224de3c9601SRichard Henderson for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
2225de3c9601SRichard Henderson r = &env->vfp.pregs[n].p[0];
2226de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
2227de3c9601SRichard Henderson if (ret) {
2228de3c9601SRichard Henderson return ret;
2229de3c9601SRichard Henderson }
2230de3c9601SRichard Henderson sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
2231de3c9601SRichard Henderson }
2232de3c9601SRichard Henderson
2233de3c9601SRichard Henderson r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
2234de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
2235de3c9601SRichard Henderson if (ret) {
2236de3c9601SRichard Henderson return ret;
2237de3c9601SRichard Henderson }
2238de3c9601SRichard Henderson sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
2239de3c9601SRichard Henderson
2240de3c9601SRichard Henderson return 0;
2241de3c9601SRichard Henderson }
2242de3c9601SRichard Henderson
kvm_arch_get_registers(CPUState * cs,Error ** errp)2243a1676bb3SJulia Suvorova int kvm_arch_get_registers(CPUState *cs, Error **errp)
2244de3c9601SRichard Henderson {
2245de3c9601SRichard Henderson uint64_t val;
2246de3c9601SRichard Henderson unsigned int el;
2247de3c9601SRichard Henderson uint32_t fpr;
2248de3c9601SRichard Henderson int i, ret;
2249de3c9601SRichard Henderson
2250de3c9601SRichard Henderson ARMCPU *cpu = ARM_CPU(cs);
2251de3c9601SRichard Henderson CPUARMState *env = &cpu->env;
2252de3c9601SRichard Henderson
2253de3c9601SRichard Henderson for (i = 0; i < 31; i++) {
2254de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
2255de3c9601SRichard Henderson &env->xregs[i]);
2256de3c9601SRichard Henderson if (ret) {
2257de3c9601SRichard Henderson return ret;
2258de3c9601SRichard Henderson }
2259de3c9601SRichard Henderson }
2260de3c9601SRichard Henderson
2261de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
2262de3c9601SRichard Henderson if (ret) {
2263de3c9601SRichard Henderson return ret;
2264de3c9601SRichard Henderson }
2265de3c9601SRichard Henderson
2266de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
2267de3c9601SRichard Henderson if (ret) {
2268de3c9601SRichard Henderson return ret;
2269de3c9601SRichard Henderson }
2270de3c9601SRichard Henderson
2271de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
2272de3c9601SRichard Henderson if (ret) {
2273de3c9601SRichard Henderson return ret;
2274de3c9601SRichard Henderson }
2275de3c9601SRichard Henderson
2276de3c9601SRichard Henderson env->aarch64 = ((val & PSTATE_nRW) == 0);
2277de3c9601SRichard Henderson if (is_a64(env)) {
2278de3c9601SRichard Henderson pstate_write(env, val);
2279de3c9601SRichard Henderson } else {
2280de3c9601SRichard Henderson cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
2281de3c9601SRichard Henderson }
2282de3c9601SRichard Henderson
2283de3c9601SRichard Henderson /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
2284de3c9601SRichard Henderson * QEMU side we keep the current SP in xregs[31] as well.
2285de3c9601SRichard Henderson */
2286de3c9601SRichard Henderson aarch64_restore_sp(env, 1);
2287de3c9601SRichard Henderson
2288de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
2289de3c9601SRichard Henderson if (ret) {
2290de3c9601SRichard Henderson return ret;
2291de3c9601SRichard Henderson }
2292de3c9601SRichard Henderson
2293de3c9601SRichard Henderson /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
2294de3c9601SRichard Henderson * incoming AArch64 regs received from 64-bit KVM.
2295de3c9601SRichard Henderson * We must perform this after all of the registers have been acquired from
2296de3c9601SRichard Henderson * the kernel.
2297de3c9601SRichard Henderson */
2298de3c9601SRichard Henderson if (!is_a64(env)) {
2299de3c9601SRichard Henderson aarch64_sync_64_to_32(env);
2300de3c9601SRichard Henderson }
2301de3c9601SRichard Henderson
2302de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
2303de3c9601SRichard Henderson if (ret) {
2304de3c9601SRichard Henderson return ret;
2305de3c9601SRichard Henderson }
2306de3c9601SRichard Henderson
2307de3c9601SRichard Henderson /* Fetch the SPSR registers
2308de3c9601SRichard Henderson *
2309de3c9601SRichard Henderson * KVM SPSRs 0-4 map to QEMU banks 1-5
2310de3c9601SRichard Henderson */
2311de3c9601SRichard Henderson for (i = 0; i < KVM_NR_SPSR; i++) {
2312de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
2313de3c9601SRichard Henderson &env->banked_spsr[i + 1]);
2314de3c9601SRichard Henderson if (ret) {
2315de3c9601SRichard Henderson return ret;
2316de3c9601SRichard Henderson }
2317de3c9601SRichard Henderson }
2318de3c9601SRichard Henderson
2319de3c9601SRichard Henderson el = arm_current_el(env);
2320de3c9601SRichard Henderson if (el > 0 && !is_a64(env)) {
2321de3c9601SRichard Henderson i = bank_number(env->uncached_cpsr & CPSR_M);
2322de3c9601SRichard Henderson env->spsr = env->banked_spsr[i];
2323de3c9601SRichard Henderson }
2324de3c9601SRichard Henderson
2325de3c9601SRichard Henderson if (cpu_isar_feature(aa64_sve, cpu)) {
2326de3c9601SRichard Henderson ret = kvm_arch_get_sve(cs);
2327de3c9601SRichard Henderson } else {
2328de3c9601SRichard Henderson ret = kvm_arch_get_fpsimd(cs);
2329de3c9601SRichard Henderson }
2330de3c9601SRichard Henderson if (ret) {
2331de3c9601SRichard Henderson return ret;
2332de3c9601SRichard Henderson }
2333de3c9601SRichard Henderson
2334de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
2335de3c9601SRichard Henderson if (ret) {
2336de3c9601SRichard Henderson return ret;
2337de3c9601SRichard Henderson }
2338de3c9601SRichard Henderson vfp_set_fpsr(env, fpr);
2339de3c9601SRichard Henderson
2340de3c9601SRichard Henderson ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
2341de3c9601SRichard Henderson if (ret) {
2342de3c9601SRichard Henderson return ret;
2343de3c9601SRichard Henderson }
2344de3c9601SRichard Henderson vfp_set_fpcr(env, fpr);
2345de3c9601SRichard Henderson
2346de3c9601SRichard Henderson ret = kvm_get_vcpu_events(cpu);
2347de3c9601SRichard Henderson if (ret) {
2348de3c9601SRichard Henderson return ret;
2349de3c9601SRichard Henderson }
2350de3c9601SRichard Henderson
2351de3c9601SRichard Henderson if (!write_kvmstate_to_list(cpu)) {
2352de3c9601SRichard Henderson return -EINVAL;
2353de3c9601SRichard Henderson }
2354de3c9601SRichard Henderson /* Note that it's OK to have registers which aren't in CPUState,
2355de3c9601SRichard Henderson * so we can ignore a failure return here.
2356de3c9601SRichard Henderson */
2357de3c9601SRichard Henderson write_list_to_cpustate(cpu);
2358de3c9601SRichard Henderson
235971c34911SRichard Henderson ret = kvm_arm_sync_mpstate_to_qemu(cpu);
2360de3c9601SRichard Henderson
2361de3c9601SRichard Henderson /* TODO: other registers */
2362de3c9601SRichard Henderson return ret;
2363de3c9601SRichard Henderson }
2364de3c9601SRichard Henderson
kvm_arch_on_sigbus_vcpu(CPUState * c,int code,void * addr)2365de3c9601SRichard Henderson void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
2366de3c9601SRichard Henderson {
2367de3c9601SRichard Henderson ram_addr_t ram_addr;
2368de3c9601SRichard Henderson hwaddr paddr;
2369de3c9601SRichard Henderson
2370de3c9601SRichard Henderson assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
2371de3c9601SRichard Henderson
2372de3c9601SRichard Henderson if (acpi_ghes_present() && addr) {
2373de3c9601SRichard Henderson ram_addr = qemu_ram_addr_from_host(addr);
2374de3c9601SRichard Henderson if (ram_addr != RAM_ADDR_INVALID &&
2375de3c9601SRichard Henderson kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
2376de3c9601SRichard Henderson kvm_hwpoison_page_add(ram_addr);
2377de3c9601SRichard Henderson /*
2378de3c9601SRichard Henderson * If this is a BUS_MCEERR_AR, we know we have been called
2379de3c9601SRichard Henderson * synchronously from the vCPU thread, so we can easily
2380de3c9601SRichard Henderson * synchronize the state and inject an error.
2381de3c9601SRichard Henderson *
2382de3c9601SRichard Henderson * TODO: we currently don't tell the guest at all about
2383de3c9601SRichard Henderson * BUS_MCEERR_AO. In that case we might either be being
2384de3c9601SRichard Henderson * called synchronously from the vCPU thread, or a bit
2385de3c9601SRichard Henderson * later from the main thread, so doing the injection of
2386de3c9601SRichard Henderson * the error would be more complicated.
2387de3c9601SRichard Henderson */
2388de3c9601SRichard Henderson if (code == BUS_MCEERR_AR) {
2389de3c9601SRichard Henderson kvm_cpu_synchronize_state(c);
2390de3c9601SRichard Henderson if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
2391de3c9601SRichard Henderson kvm_inject_arm_sea(c);
2392de3c9601SRichard Henderson } else {
2393de3c9601SRichard Henderson error_report("failed to record the error");
2394de3c9601SRichard Henderson abort();
2395de3c9601SRichard Henderson }
2396de3c9601SRichard Henderson }
2397de3c9601SRichard Henderson return;
2398de3c9601SRichard Henderson }
2399de3c9601SRichard Henderson if (code == BUS_MCEERR_AO) {
2400de3c9601SRichard Henderson error_report("Hardware memory error at addr %p for memory used by "
2401de3c9601SRichard Henderson "QEMU itself instead of guest system!", addr);
2402de3c9601SRichard Henderson }
2403de3c9601SRichard Henderson }
2404de3c9601SRichard Henderson
2405de3c9601SRichard Henderson if (code == BUS_MCEERR_AR) {
2406de3c9601SRichard Henderson error_report("Hardware memory error!");
2407de3c9601SRichard Henderson exit(1);
2408de3c9601SRichard Henderson }
2409de3c9601SRichard Henderson }
2410de3c9601SRichard Henderson
2411de3c9601SRichard Henderson /* C6.6.29 BRK instruction */
2412de3c9601SRichard Henderson static const uint32_t brk_insn = 0xd4200000;
2413de3c9601SRichard Henderson
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2414de3c9601SRichard Henderson int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2415de3c9601SRichard Henderson {
2416de3c9601SRichard Henderson if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
2417de3c9601SRichard Henderson cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
2418de3c9601SRichard Henderson return -EINVAL;
2419de3c9601SRichard Henderson }
2420de3c9601SRichard Henderson return 0;
2421de3c9601SRichard Henderson }
2422de3c9601SRichard Henderson
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2423de3c9601SRichard Henderson int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2424de3c9601SRichard Henderson {
2425de3c9601SRichard Henderson static uint32_t brk;
2426de3c9601SRichard Henderson
2427de3c9601SRichard Henderson if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
2428de3c9601SRichard Henderson brk != brk_insn ||
2429de3c9601SRichard Henderson cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2430de3c9601SRichard Henderson return -EINVAL;
2431de3c9601SRichard Henderson }
2432de3c9601SRichard Henderson return 0;
2433de3c9601SRichard Henderson }
2434918d0de0SCornelia Huck
kvm_arm_enable_mte(Object * cpuobj,Error ** errp)2435918d0de0SCornelia Huck void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
2436918d0de0SCornelia Huck {
2437918d0de0SCornelia Huck static bool tried_to_enable;
2438918d0de0SCornelia Huck static bool succeeded_to_enable;
2439918d0de0SCornelia Huck Error *mte_migration_blocker = NULL;
2440918d0de0SCornelia Huck ARMCPU *cpu = ARM_CPU(cpuobj);
2441918d0de0SCornelia Huck int ret;
2442918d0de0SCornelia Huck
2443918d0de0SCornelia Huck if (!tried_to_enable) {
2444918d0de0SCornelia Huck /*
2445918d0de0SCornelia Huck * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
2446918d0de0SCornelia Huck * sense), and we only want a single migration blocker as well.
2447918d0de0SCornelia Huck */
2448918d0de0SCornelia Huck tried_to_enable = true;
2449918d0de0SCornelia Huck
2450918d0de0SCornelia Huck ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0);
2451918d0de0SCornelia Huck if (ret) {
2452918d0de0SCornelia Huck error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
2453918d0de0SCornelia Huck return;
2454918d0de0SCornelia Huck }
2455918d0de0SCornelia Huck
2456918d0de0SCornelia Huck /* TODO: Add migration support with MTE enabled */
2457918d0de0SCornelia Huck error_setg(&mte_migration_blocker,
2458918d0de0SCornelia Huck "Live migration disabled due to MTE enabled");
2459918d0de0SCornelia Huck if (migrate_add_blocker(&mte_migration_blocker, errp)) {
2460918d0de0SCornelia Huck error_free(mte_migration_blocker);
2461918d0de0SCornelia Huck return;
2462918d0de0SCornelia Huck }
2463918d0de0SCornelia Huck
2464918d0de0SCornelia Huck succeeded_to_enable = true;
2465918d0de0SCornelia Huck }
2466918d0de0SCornelia Huck
2467918d0de0SCornelia Huck if (succeeded_to_enable) {
2468918d0de0SCornelia Huck cpu->kvm_mte = true;
2469918d0de0SCornelia Huck }
2470918d0de0SCornelia Huck }
2471