1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012-2014 Imagination Technologies Ltd.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14
15 #include <linux/kvm.h>
16
17 #include "cpu.h"
18 #include "internal.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "sysemu/kvm.h"
22 #include "sysemu/kvm_int.h"
23 #include "sysemu/runstate.h"
24 #include "kvm_mips.h"
25 #include "hw/boards.h"
26 #include "fpu_helper.h"
27
28 #define DEBUG_KVM 0
29
30 #define DPRINTF(fmt, ...) \
31 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
32
33 static int kvm_mips_fpu_cap;
34 static int kvm_mips_msa_cap;
35
36 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
37 KVM_CAP_LAST_INFO
38 };
39
40 static void kvm_mips_update_state(void *opaque, bool running, RunState state);
41
kvm_arch_vcpu_id(CPUState * cs)42 unsigned long kvm_arch_vcpu_id(CPUState *cs)
43 {
44 return cs->cpu_index;
45 }
46
kvm_arch_init(MachineState * ms,KVMState * s)47 int kvm_arch_init(MachineState *ms, KVMState *s)
48 {
49 /* MIPS has 128 signals */
50 kvm_set_sigmask_len(s, 16);
51
52 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
53 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
54
55 DPRINTF("%s\n", __func__);
56 return 0;
57 }
58
kvm_arch_irqchip_create(KVMState * s)59 int kvm_arch_irqchip_create(KVMState *s)
60 {
61 return 0;
62 }
63
kvm_arch_init_vcpu(CPUState * cs)64 int kvm_arch_init_vcpu(CPUState *cs)
65 {
66 CPUMIPSState *env = cpu_env(cs);
67 int ret = 0;
68
69 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
70
71 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
72 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
73 if (ret < 0) {
74 /* mark unsupported so it gets disabled on reset */
75 kvm_mips_fpu_cap = 0;
76 ret = 0;
77 }
78 }
79
80 if (kvm_mips_msa_cap && ase_msa_available(env)) {
81 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
82 if (ret < 0) {
83 /* mark unsupported so it gets disabled on reset */
84 kvm_mips_msa_cap = 0;
85 ret = 0;
86 }
87 }
88
89 DPRINTF("%s\n", __func__);
90 return ret;
91 }
92
kvm_arch_destroy_vcpu(CPUState * cs)93 int kvm_arch_destroy_vcpu(CPUState *cs)
94 {
95 return 0;
96 }
97
kvm_mips_reset_vcpu(MIPSCPU * cpu)98 void kvm_mips_reset_vcpu(MIPSCPU *cpu)
99 {
100 CPUMIPSState *env = &cpu->env;
101
102 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
103 warn_report("KVM does not support FPU, disabling");
104 env->CP0_Config1 &= ~(1 << CP0C1_FP);
105 }
106 if (!kvm_mips_msa_cap && ase_msa_available(env)) {
107 warn_report("KVM does not support MSA, disabling");
108 env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
109 }
110
111 DPRINTF("%s\n", __func__);
112 }
113
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)114 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
115 {
116 DPRINTF("%s\n", __func__);
117 return 0;
118 }
119
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)120 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
121 {
122 DPRINTF("%s\n", __func__);
123 return 0;
124 }
125
cpu_mips_io_interrupts_pending(MIPSCPU * cpu)126 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
127 {
128 CPUMIPSState *env = &cpu->env;
129
130 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
131 }
132
133
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)134 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
135 {
136 MIPSCPU *cpu = MIPS_CPU(cs);
137 int r;
138 struct kvm_mips_interrupt intr;
139
140 bql_lock();
141
142 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
143 cpu_mips_io_interrupts_pending(cpu)) {
144 intr.cpu = -1;
145 intr.irq = 2;
146 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
147 if (r < 0) {
148 error_report("%s: cpu %d: failed to inject IRQ %x",
149 __func__, cs->cpu_index, intr.irq);
150 }
151 }
152
153 bql_unlock();
154 }
155
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)156 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
157 {
158 return MEMTXATTRS_UNSPECIFIED;
159 }
160
kvm_arch_process_async_events(CPUState * cs)161 int kvm_arch_process_async_events(CPUState *cs)
162 {
163 return cs->halted;
164 }
165
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)166 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
167 {
168 int ret;
169
170 DPRINTF("%s\n", __func__);
171 switch (run->exit_reason) {
172 default:
173 error_report("%s: unknown exit reason %d",
174 __func__, run->exit_reason);
175 ret = -1;
176 break;
177 }
178
179 return ret;
180 }
181
kvm_arch_stop_on_emulation_error(CPUState * cs)182 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
183 {
184 DPRINTF("%s\n", __func__);
185 return true;
186 }
187
kvm_arch_init_irq_routing(KVMState * s)188 void kvm_arch_init_irq_routing(KVMState *s)
189 {
190 }
191
kvm_mips_set_interrupt(MIPSCPU * cpu,int irq,int level)192 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
193 {
194 CPUState *cs = CPU(cpu);
195 struct kvm_mips_interrupt intr;
196
197 assert(kvm_enabled());
198
199 intr.cpu = -1;
200
201 if (level) {
202 intr.irq = irq;
203 } else {
204 intr.irq = -irq;
205 }
206
207 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
208
209 return 0;
210 }
211
kvm_mips_set_ipi_interrupt(MIPSCPU * cpu,int irq,int level)212 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
213 {
214 CPUState *cs = current_cpu;
215 CPUState *dest_cs = CPU(cpu);
216 struct kvm_mips_interrupt intr;
217
218 assert(kvm_enabled());
219
220 intr.cpu = dest_cs->cpu_index;
221
222 if (level) {
223 intr.irq = irq;
224 } else {
225 intr.irq = -irq;
226 }
227
228 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
229
230 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
231
232 return 0;
233 }
234
235 #define MIPS_CP0_32(_R, _S) \
236 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
237
238 #define MIPS_CP0_64(_R, _S) \
239 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
240
241 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
242 #define KVM_REG_MIPS_CP0_RANDOM MIPS_CP0_32(1, 0)
243 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
244 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
245 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
246 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
247 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
248 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
249 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
250 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
251 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
252 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
253 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
254 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
255 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
256 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
257 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
258 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
259 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
260 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
261 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
262 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
263 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
264 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
265 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
266 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
267 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
268 #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
269 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
270 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
271 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
272 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
273 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
274 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
275 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
276 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
277
kvm_mips_put_one_reg(CPUState * cs,uint64_t reg_id,int32_t * addr)278 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
279 int32_t *addr)
280 {
281 struct kvm_one_reg cp0reg = {
282 .id = reg_id,
283 .addr = (uintptr_t)addr
284 };
285
286 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
287 }
288
kvm_mips_put_one_ureg(CPUState * cs,uint64_t reg_id,uint32_t * addr)289 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
290 uint32_t *addr)
291 {
292 struct kvm_one_reg cp0reg = {
293 .id = reg_id,
294 .addr = (uintptr_t)addr
295 };
296
297 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
298 }
299
kvm_mips_put_one_ulreg(CPUState * cs,uint64_t reg_id,target_ulong * addr)300 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
301 target_ulong *addr)
302 {
303 uint64_t val64 = *addr;
304 struct kvm_one_reg cp0reg = {
305 .id = reg_id,
306 .addr = (uintptr_t)&val64
307 };
308
309 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
310 }
311
kvm_mips_put_one_reg64(CPUState * cs,uint64_t reg_id,int64_t * addr)312 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
313 int64_t *addr)
314 {
315 struct kvm_one_reg cp0reg = {
316 .id = reg_id,
317 .addr = (uintptr_t)addr
318 };
319
320 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
321 }
322
kvm_mips_put_one_ureg64(CPUState * cs,uint64_t reg_id,uint64_t * addr)323 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
324 uint64_t *addr)
325 {
326 struct kvm_one_reg cp0reg = {
327 .id = reg_id,
328 .addr = (uintptr_t)addr
329 };
330
331 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
332 }
333
kvm_mips_get_one_reg(CPUState * cs,uint64_t reg_id,int32_t * addr)334 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
335 int32_t *addr)
336 {
337 struct kvm_one_reg cp0reg = {
338 .id = reg_id,
339 .addr = (uintptr_t)addr
340 };
341
342 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
343 }
344
kvm_mips_get_one_ureg(CPUState * cs,uint64_t reg_id,uint32_t * addr)345 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
346 uint32_t *addr)
347 {
348 struct kvm_one_reg cp0reg = {
349 .id = reg_id,
350 .addr = (uintptr_t)addr
351 };
352
353 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
354 }
355
kvm_mips_get_one_ulreg(CPUState * cs,uint64_t reg_id,target_ulong * addr)356 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
357 target_ulong *addr)
358 {
359 int ret;
360 uint64_t val64 = 0;
361 struct kvm_one_reg cp0reg = {
362 .id = reg_id,
363 .addr = (uintptr_t)&val64
364 };
365
366 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
367 if (ret >= 0) {
368 *addr = val64;
369 }
370 return ret;
371 }
372
kvm_mips_get_one_reg64(CPUState * cs,uint64_t reg_id,int64_t * addr)373 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
374 int64_t *addr)
375 {
376 struct kvm_one_reg cp0reg = {
377 .id = reg_id,
378 .addr = (uintptr_t)addr
379 };
380
381 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
382 }
383
kvm_mips_get_one_ureg64(CPUState * cs,uint64_t reg_id,uint64_t * addr)384 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
385 uint64_t *addr)
386 {
387 struct kvm_one_reg cp0reg = {
388 .id = reg_id,
389 .addr = (uintptr_t)addr
390 };
391
392 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
393 }
394
395 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
396 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
397 (1U << CP0C1_FP))
398 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
399 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
400 (1U << CP0C3_MSAP))
401 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
402 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
403 (1U << CP0C5_UFE) | \
404 (1U << CP0C5_FRE) | \
405 (1U << CP0C5_UFR))
406 #define KVM_REG_MIPS_CP0_CONFIG6_MASK ((1U << CP0C6_BPPASS) | \
407 (0x3fU << CP0C6_KPOS) | \
408 (1U << CP0C6_KE) | \
409 (1U << CP0C6_VTLBONLY) | \
410 (1U << CP0C6_LASX) | \
411 (1U << CP0C6_SSEN) | \
412 (1U << CP0C6_DISDRTIME) | \
413 (1U << CP0C6_PIXNUEN) | \
414 (1U << CP0C6_SCRAND) | \
415 (1U << CP0C6_LLEXCEN) | \
416 (1U << CP0C6_DISVC) | \
417 (1U << CP0C6_VCLRU) | \
418 (1U << CP0C6_DCLRU) | \
419 (1U << CP0C6_PIXUEN) | \
420 (1U << CP0C6_DISBLKLYEN) | \
421 (1U << CP0C6_UMEMUALEN) | \
422 (1U << CP0C6_SFBEN) | \
423 (1U << CP0C6_FLTINT) | \
424 (1U << CP0C6_VLTINT) | \
425 (1U << CP0C6_DISBTB) | \
426 (3U << CP0C6_STPREFCTL) | \
427 (1U << CP0C6_INSTPREF) | \
428 (1U << CP0C6_DATAPREF))
429
kvm_mips_change_one_reg(CPUState * cs,uint64_t reg_id,int32_t * addr,int32_t mask)430 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
431 int32_t *addr, int32_t mask)
432 {
433 int err;
434 int32_t tmp, change;
435
436 err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
437 if (err < 0) {
438 return err;
439 }
440
441 /* only change bits in mask */
442 change = (*addr ^ tmp) & mask;
443 if (!change) {
444 return 0;
445 }
446
447 tmp = tmp ^ change;
448 return kvm_mips_put_one_reg(cs, reg_id, &tmp);
449 }
450
451 /*
452 * We freeze the KVM timer when either the VM clock is stopped or the state is
453 * saved (the state is dirty).
454 */
455
456 /*
457 * Save the state of the KVM timer when VM clock is stopped or state is synced
458 * to QEMU.
459 */
kvm_mips_save_count(CPUState * cs)460 static int kvm_mips_save_count(CPUState *cs)
461 {
462 CPUMIPSState *env = cpu_env(cs);
463 uint64_t count_ctl;
464 int err, ret = 0;
465
466 /* freeze KVM timer */
467 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
468 if (err < 0) {
469 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
470 ret = err;
471 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
472 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
473 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
474 if (err < 0) {
475 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
476 ret = err;
477 }
478 }
479
480 /* read CP0_Cause */
481 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
482 if (err < 0) {
483 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
484 ret = err;
485 }
486
487 /* read CP0_Count */
488 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
489 if (err < 0) {
490 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
491 ret = err;
492 }
493
494 return ret;
495 }
496
497 /*
498 * Restore the state of the KVM timer when VM clock is restarted or state is
499 * synced to KVM.
500 */
kvm_mips_restore_count(CPUState * cs)501 static int kvm_mips_restore_count(CPUState *cs)
502 {
503 CPUMIPSState *env = cpu_env(cs);
504 uint64_t count_ctl;
505 int err_dc, err, ret = 0;
506
507 /* check the timer is frozen */
508 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
509 if (err_dc < 0) {
510 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
511 ret = err_dc;
512 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
513 /* freeze timer (sets COUNT_RESUME for us) */
514 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
515 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
516 if (err < 0) {
517 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
518 ret = err;
519 }
520 }
521
522 /* load CP0_Cause */
523 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
524 if (err < 0) {
525 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
526 ret = err;
527 }
528
529 /* load CP0_Count */
530 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
531 if (err < 0) {
532 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
533 ret = err;
534 }
535
536 /* resume KVM timer */
537 if (err_dc >= 0) {
538 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
539 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
540 if (err < 0) {
541 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
542 ret = err;
543 }
544 }
545
546 return ret;
547 }
548
549 /*
550 * Handle the VM clock being started or stopped
551 */
kvm_mips_update_state(void * opaque,bool running,RunState state)552 static void kvm_mips_update_state(void *opaque, bool running, RunState state)
553 {
554 CPUState *cs = opaque;
555 int ret;
556 uint64_t count_resume;
557
558 /*
559 * If state is already dirty (synced to QEMU) then the KVM timer state is
560 * already saved and can be restored when it is synced back to KVM.
561 */
562 if (!running) {
563 if (!cs->vcpu_dirty) {
564 ret = kvm_mips_save_count(cs);
565 if (ret < 0) {
566 warn_report("Failed saving count");
567 }
568 }
569 } else {
570 /* Set clock restore time to now */
571 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
572 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
573 &count_resume);
574 if (ret < 0) {
575 warn_report("Failed setting COUNT_RESUME");
576 return;
577 }
578
579 if (!cs->vcpu_dirty) {
580 ret = kvm_mips_restore_count(cs);
581 if (ret < 0) {
582 warn_report("Failed restoring count");
583 }
584 }
585 }
586 }
587
kvm_mips_put_fpu_registers(CPUState * cs,int level)588 static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
589 {
590 CPUMIPSState *env = cpu_env(cs);
591 int err, ret = 0;
592 unsigned int i;
593
594 /* Only put FPU state if we're emulating a CPU with an FPU */
595 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
596 /* FPU Control Registers */
597 if (level == KVM_PUT_FULL_STATE) {
598 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
599 &env->active_fpu.fcr0);
600 if (err < 0) {
601 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
602 ret = err;
603 }
604 }
605 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
606 &env->active_fpu.fcr31);
607 if (err < 0) {
608 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
609 ret = err;
610 }
611
612 /*
613 * FPU register state is a subset of MSA vector state, so don't put FPU
614 * registers if we're emulating a CPU with MSA.
615 */
616 if (!ase_msa_available(env)) {
617 /* Floating point registers */
618 for (i = 0; i < 32; ++i) {
619 if (env->CP0_Status & (1 << CP0St_FR)) {
620 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
621 &env->active_fpu.fpr[i].d);
622 } else {
623 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
624 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
625 }
626 if (err < 0) {
627 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
628 ret = err;
629 }
630 }
631 }
632 }
633
634 /* Only put MSA state if we're emulating a CPU with MSA */
635 if (ase_msa_available(env)) {
636 /* MSA Control Registers */
637 if (level == KVM_PUT_FULL_STATE) {
638 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
639 &env->msair);
640 if (err < 0) {
641 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
642 ret = err;
643 }
644 }
645 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
646 &env->active_tc.msacsr);
647 if (err < 0) {
648 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
649 ret = err;
650 }
651
652 /* Vector registers (includes FP registers) */
653 for (i = 0; i < 32; ++i) {
654 /* Big endian MSA not supported by QEMU yet anyway */
655 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
656 env->active_fpu.fpr[i].wr.d);
657 if (err < 0) {
658 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
659 ret = err;
660 }
661 }
662 }
663
664 return ret;
665 }
666
kvm_mips_get_fpu_registers(CPUState * cs)667 static int kvm_mips_get_fpu_registers(CPUState *cs)
668 {
669 CPUMIPSState *env = cpu_env(cs);
670 int err, ret = 0;
671 unsigned int i;
672
673 /* Only get FPU state if we're emulating a CPU with an FPU */
674 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
675 /* FPU Control Registers */
676 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
677 &env->active_fpu.fcr0);
678 if (err < 0) {
679 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
680 ret = err;
681 }
682 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
683 &env->active_fpu.fcr31);
684 if (err < 0) {
685 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
686 ret = err;
687 } else {
688 restore_fp_status(env);
689 }
690
691 /*
692 * FPU register state is a subset of MSA vector state, so don't save FPU
693 * registers if we're emulating a CPU with MSA.
694 */
695 if (!ase_msa_available(env)) {
696 /* Floating point registers */
697 for (i = 0; i < 32; ++i) {
698 if (env->CP0_Status & (1 << CP0St_FR)) {
699 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
700 &env->active_fpu.fpr[i].d);
701 } else {
702 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
703 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
704 }
705 if (err < 0) {
706 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
707 ret = err;
708 }
709 }
710 }
711 }
712
713 /* Only get MSA state if we're emulating a CPU with MSA */
714 if (ase_msa_available(env)) {
715 /* MSA Control Registers */
716 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
717 &env->msair);
718 if (err < 0) {
719 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
720 ret = err;
721 }
722 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
723 &env->active_tc.msacsr);
724 if (err < 0) {
725 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
726 ret = err;
727 } else {
728 restore_msa_fp_status(env);
729 }
730
731 /* Vector registers (includes FP registers) */
732 for (i = 0; i < 32; ++i) {
733 /* Big endian MSA not supported by QEMU yet anyway */
734 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
735 env->active_fpu.fpr[i].wr.d);
736 if (err < 0) {
737 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
738 ret = err;
739 }
740 }
741 }
742
743 return ret;
744 }
745
746
kvm_mips_put_cp0_registers(CPUState * cs,int level)747 static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
748 {
749 CPUMIPSState *env = cpu_env(cs);
750 int err, ret = 0;
751
752 (void)level;
753
754 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
755 if (err < 0) {
756 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
757 ret = err;
758 }
759 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
760 if (err < 0) {
761 DPRINTF("%s: Failed to put CP0_RANDOM (%d)\n", __func__, err);
762 ret = err;
763 }
764 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
765 &env->CP0_Context);
766 if (err < 0) {
767 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
768 ret = err;
769 }
770 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
771 &env->active_tc.CP0_UserLocal);
772 if (err < 0) {
773 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
774 ret = err;
775 }
776 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
777 &env->CP0_PageMask);
778 if (err < 0) {
779 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
780 ret = err;
781 }
782 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
783 &env->CP0_PageGrain);
784 if (err < 0) {
785 DPRINTF("%s: Failed to put CP0_PAGEGRAIN (%d)\n", __func__, err);
786 ret = err;
787 }
788 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
789 &env->CP0_PWBase);
790 if (err < 0) {
791 DPRINTF("%s: Failed to put CP0_PWBASE (%d)\n", __func__, err);
792 ret = err;
793 }
794 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
795 &env->CP0_PWField);
796 if (err < 0) {
797 DPRINTF("%s: Failed to put CP0_PWField (%d)\n", __func__, err);
798 ret = err;
799 }
800 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
801 &env->CP0_PWSize);
802 if (err < 0) {
803 DPRINTF("%s: Failed to put CP0_PWSIZE (%d)\n", __func__, err);
804 ret = err;
805 }
806 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
807 if (err < 0) {
808 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
809 ret = err;
810 }
811 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
812 if (err < 0) {
813 DPRINTF("%s: Failed to put CP0_PWCTL (%d)\n", __func__, err);
814 ret = err;
815 }
816 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
817 if (err < 0) {
818 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
819 ret = err;
820 }
821 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
822 &env->CP0_BadVAddr);
823 if (err < 0) {
824 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
825 ret = err;
826 }
827
828 /* If VM clock stopped then state will be restored when it is restarted */
829 if (runstate_is_running()) {
830 err = kvm_mips_restore_count(cs);
831 if (err < 0) {
832 ret = err;
833 }
834 }
835
836 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
837 &env->CP0_EntryHi);
838 if (err < 0) {
839 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
840 ret = err;
841 }
842 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
843 &env->CP0_Compare);
844 if (err < 0) {
845 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
846 ret = err;
847 }
848 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
849 if (err < 0) {
850 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
851 ret = err;
852 }
853 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
854 if (err < 0) {
855 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
856 ret = err;
857 }
858 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
859 if (err < 0) {
860 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
861 ret = err;
862 }
863 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
864 if (err < 0) {
865 DPRINTF("%s: Failed to put CP0_EBASE (%d)\n", __func__, err);
866 ret = err;
867 }
868 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
869 &env->CP0_Config0,
870 KVM_REG_MIPS_CP0_CONFIG_MASK);
871 if (err < 0) {
872 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
873 ret = err;
874 }
875 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
876 &env->CP0_Config1,
877 KVM_REG_MIPS_CP0_CONFIG1_MASK);
878 if (err < 0) {
879 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
880 ret = err;
881 }
882 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
883 &env->CP0_Config2,
884 KVM_REG_MIPS_CP0_CONFIG2_MASK);
885 if (err < 0) {
886 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
887 ret = err;
888 }
889 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
890 &env->CP0_Config3,
891 KVM_REG_MIPS_CP0_CONFIG3_MASK);
892 if (err < 0) {
893 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
894 ret = err;
895 }
896 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
897 &env->CP0_Config4,
898 KVM_REG_MIPS_CP0_CONFIG4_MASK);
899 if (err < 0) {
900 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
901 ret = err;
902 }
903 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
904 &env->CP0_Config5,
905 KVM_REG_MIPS_CP0_CONFIG5_MASK);
906 if (err < 0) {
907 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
908 ret = err;
909 }
910 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6,
911 &env->CP0_Config6,
912 KVM_REG_MIPS_CP0_CONFIG6_MASK);
913 if (err < 0) {
914 DPRINTF("%s: Failed to change CP0_CONFIG6 (%d)\n", __func__, err);
915 ret = err;
916 }
917 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
918 &env->CP0_XContext);
919 if (err < 0) {
920 DPRINTF("%s: Failed to put CP0_XCONTEXT (%d)\n", __func__, err);
921 ret = err;
922 }
923 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
924 &env->CP0_ErrorEPC);
925 if (err < 0) {
926 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
927 ret = err;
928 }
929 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
930 &env->CP0_KScratch[0]);
931 if (err < 0) {
932 DPRINTF("%s: Failed to put CP0_KSCRATCH1 (%d)\n", __func__, err);
933 ret = err;
934 }
935 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
936 &env->CP0_KScratch[1]);
937 if (err < 0) {
938 DPRINTF("%s: Failed to put CP0_KSCRATCH2 (%d)\n", __func__, err);
939 ret = err;
940 }
941 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
942 &env->CP0_KScratch[2]);
943 if (err < 0) {
944 DPRINTF("%s: Failed to put CP0_KSCRATCH3 (%d)\n", __func__, err);
945 ret = err;
946 }
947 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
948 &env->CP0_KScratch[3]);
949 if (err < 0) {
950 DPRINTF("%s: Failed to put CP0_KSCRATCH4 (%d)\n", __func__, err);
951 ret = err;
952 }
953 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
954 &env->CP0_KScratch[4]);
955 if (err < 0) {
956 DPRINTF("%s: Failed to put CP0_KSCRATCH5 (%d)\n", __func__, err);
957 ret = err;
958 }
959 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
960 &env->CP0_KScratch[5]);
961 if (err < 0) {
962 DPRINTF("%s: Failed to put CP0_KSCRATCH6 (%d)\n", __func__, err);
963 ret = err;
964 }
965
966 return ret;
967 }
968
kvm_mips_get_cp0_registers(CPUState * cs)969 static int kvm_mips_get_cp0_registers(CPUState *cs)
970 {
971 CPUMIPSState *env = cpu_env(cs);
972 int err, ret = 0;
973
974 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
975 if (err < 0) {
976 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
977 ret = err;
978 }
979 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
980 if (err < 0) {
981 DPRINTF("%s: Failed to get CP0_RANDOM (%d)\n", __func__, err);
982 ret = err;
983 }
984 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
985 &env->CP0_Context);
986 if (err < 0) {
987 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
988 ret = err;
989 }
990 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
991 &env->active_tc.CP0_UserLocal);
992 if (err < 0) {
993 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
994 ret = err;
995 }
996 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
997 &env->CP0_PageMask);
998 if (err < 0) {
999 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
1000 ret = err;
1001 }
1002 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
1003 &env->CP0_PageGrain);
1004 if (err < 0) {
1005 DPRINTF("%s: Failed to get CP0_PAGEGRAIN (%d)\n", __func__, err);
1006 ret = err;
1007 }
1008 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
1009 &env->CP0_PWBase);
1010 if (err < 0) {
1011 DPRINTF("%s: Failed to get CP0_PWBASE (%d)\n", __func__, err);
1012 ret = err;
1013 }
1014 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
1015 &env->CP0_PWField);
1016 if (err < 0) {
1017 DPRINTF("%s: Failed to get CP0_PWFIELD (%d)\n", __func__, err);
1018 ret = err;
1019 }
1020 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
1021 &env->CP0_PWSize);
1022 if (err < 0) {
1023 DPRINTF("%s: Failed to get CP0_PWSIZE (%d)\n", __func__, err);
1024 ret = err;
1025 }
1026 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
1027 if (err < 0) {
1028 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
1029 ret = err;
1030 }
1031 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
1032 if (err < 0) {
1033 DPRINTF("%s: Failed to get CP0_PWCtl (%d)\n", __func__, err);
1034 ret = err;
1035 }
1036 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
1037 if (err < 0) {
1038 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
1039 ret = err;
1040 }
1041 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
1042 &env->CP0_BadVAddr);
1043 if (err < 0) {
1044 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
1045 ret = err;
1046 }
1047 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
1048 &env->CP0_EntryHi);
1049 if (err < 0) {
1050 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
1051 ret = err;
1052 }
1053 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
1054 &env->CP0_Compare);
1055 if (err < 0) {
1056 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
1057 ret = err;
1058 }
1059 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
1060 if (err < 0) {
1061 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
1062 ret = err;
1063 }
1064
1065 /* If VM clock stopped then state was already saved when it was stopped */
1066 if (runstate_is_running()) {
1067 err = kvm_mips_save_count(cs);
1068 if (err < 0) {
1069 ret = err;
1070 }
1071 }
1072
1073 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
1074 if (err < 0) {
1075 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
1076 ret = err;
1077 }
1078 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
1079 if (err < 0) {
1080 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
1081 ret = err;
1082 }
1083 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
1084 if (err < 0) {
1085 DPRINTF("%s: Failed to get CP0_EBASE (%d)\n", __func__, err);
1086 ret = err;
1087 }
1088 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
1089 if (err < 0) {
1090 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
1091 ret = err;
1092 }
1093 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
1094 if (err < 0) {
1095 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
1096 ret = err;
1097 }
1098 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
1099 if (err < 0) {
1100 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
1101 ret = err;
1102 }
1103 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
1104 if (err < 0) {
1105 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
1106 ret = err;
1107 }
1108 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
1109 if (err < 0) {
1110 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
1111 ret = err;
1112 }
1113 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
1114 if (err < 0) {
1115 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
1116 ret = err;
1117 }
1118 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6, &env->CP0_Config6);
1119 if (err < 0) {
1120 DPRINTF("%s: Failed to get CP0_CONFIG6 (%d)\n", __func__, err);
1121 ret = err;
1122 }
1123 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
1124 &env->CP0_XContext);
1125 if (err < 0) {
1126 DPRINTF("%s: Failed to get CP0_XCONTEXT (%d)\n", __func__, err);
1127 ret = err;
1128 }
1129 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
1130 &env->CP0_ErrorEPC);
1131 if (err < 0) {
1132 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
1133 ret = err;
1134 }
1135 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
1136 &env->CP0_KScratch[0]);
1137 if (err < 0) {
1138 DPRINTF("%s: Failed to get CP0_KSCRATCH1 (%d)\n", __func__, err);
1139 ret = err;
1140 }
1141 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
1142 &env->CP0_KScratch[1]);
1143 if (err < 0) {
1144 DPRINTF("%s: Failed to get CP0_KSCRATCH2 (%d)\n", __func__, err);
1145 ret = err;
1146 }
1147 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
1148 &env->CP0_KScratch[2]);
1149 if (err < 0) {
1150 DPRINTF("%s: Failed to get CP0_KSCRATCH3 (%d)\n", __func__, err);
1151 ret = err;
1152 }
1153 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
1154 &env->CP0_KScratch[3]);
1155 if (err < 0) {
1156 DPRINTF("%s: Failed to get CP0_KSCRATCH4 (%d)\n", __func__, err);
1157 ret = err;
1158 }
1159 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
1160 &env->CP0_KScratch[4]);
1161 if (err < 0) {
1162 DPRINTF("%s: Failed to get CP0_KSCRATCH5 (%d)\n", __func__, err);
1163 ret = err;
1164 }
1165 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
1166 &env->CP0_KScratch[5]);
1167 if (err < 0) {
1168 DPRINTF("%s: Failed to get CP0_KSCRATCH6 (%d)\n", __func__, err);
1169 ret = err;
1170 }
1171
1172 return ret;
1173 }
1174
kvm_arch_put_registers(CPUState * cs,int level)1175 int kvm_arch_put_registers(CPUState *cs, int level)
1176 {
1177 CPUMIPSState *env = cpu_env(cs);
1178 struct kvm_regs regs;
1179 int ret;
1180 int i;
1181
1182 /* Set the registers based on QEMU's view of things */
1183 for (i = 0; i < 32; i++) {
1184 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
1185 }
1186
1187 regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
1188 regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
1189 regs.pc = (int64_t)(target_long)env->active_tc.PC;
1190
1191 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
1192
1193 if (ret < 0) {
1194 return ret;
1195 }
1196
1197 ret = kvm_mips_put_cp0_registers(cs, level);
1198 if (ret < 0) {
1199 return ret;
1200 }
1201
1202 ret = kvm_mips_put_fpu_registers(cs, level);
1203 if (ret < 0) {
1204 return ret;
1205 }
1206
1207 return ret;
1208 }
1209
kvm_arch_get_registers(CPUState * cs)1210 int kvm_arch_get_registers(CPUState *cs)
1211 {
1212 CPUMIPSState *env = cpu_env(cs);
1213 int ret = 0;
1214 struct kvm_regs regs;
1215 int i;
1216
1217 /* Get the current register set as KVM seems it */
1218 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
1219
1220 if (ret < 0) {
1221 return ret;
1222 }
1223
1224 for (i = 0; i < 32; i++) {
1225 env->active_tc.gpr[i] = regs.gpr[i];
1226 }
1227
1228 env->active_tc.HI[0] = regs.hi;
1229 env->active_tc.LO[0] = regs.lo;
1230 env->active_tc.PC = regs.pc;
1231
1232 kvm_mips_get_cp0_registers(cs);
1233 kvm_mips_get_fpu_registers(cs);
1234
1235 return ret;
1236 }
1237
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)1238 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1239 uint64_t address, uint32_t data, PCIDevice *dev)
1240 {
1241 return 0;
1242 }
1243
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)1244 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1245 int vector, PCIDevice *dev)
1246 {
1247 return 0;
1248 }
1249
kvm_arch_release_virq_post(int virq)1250 int kvm_arch_release_virq_post(int virq)
1251 {
1252 return 0;
1253 }
1254
kvm_arch_msi_data_to_gsi(uint32_t data)1255 int kvm_arch_msi_data_to_gsi(uint32_t data)
1256 {
1257 abort();
1258 }
1259
kvm_arch_get_default_type(MachineState * machine)1260 int kvm_arch_get_default_type(MachineState *machine)
1261 {
1262 #if defined(KVM_CAP_MIPS_VZ)
1263 int r;
1264 KVMState *s = KVM_STATE(machine->accelerator);
1265
1266 r = kvm_check_extension(s, KVM_CAP_MIPS_VZ);
1267 if (r > 0) {
1268 return KVM_VM_MIPS_VZ;
1269 }
1270 #endif
1271
1272 error_report("KVM_VM_MIPS_VZ type is not available");
1273 return -1;
1274 }
1275
kvm_arch_accel_class_init(ObjectClass * oc)1276 void kvm_arch_accel_class_init(ObjectClass *oc)
1277 {
1278 }
1279