1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: MIPS specific KVM APIs 7 * 8 * Copyright (C) 2012-2014 Imagination Technologies Ltd. 9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 */ 11 12 #include "qemu/osdep.h" 13 #include <sys/ioctl.h> 14 15 #include <linux/kvm.h> 16 17 #include "qemu-common.h" 18 #include "cpu.h" 19 #include "internal.h" 20 #include "qemu/error-report.h" 21 #include "qemu/main-loop.h" 22 #include "qemu/timer.h" 23 #include "sysemu/kvm.h" 24 #include "sysemu/kvm_int.h" 25 #include "sysemu/runstate.h" 26 #include "sysemu/cpus.h" 27 #include "kvm_mips.h" 28 #include "exec/memattrs.h" 29 #include "hw/boards.h" 30 31 #define DEBUG_KVM 0 32 33 #define DPRINTF(fmt, ...) \ 34 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0) 35 36 static int kvm_mips_fpu_cap; 37 static int kvm_mips_msa_cap; 38 39 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 40 KVM_CAP_LAST_INFO 41 }; 42 43 static void kvm_mips_update_state(void *opaque, int running, RunState state); 44 45 unsigned long kvm_arch_vcpu_id(CPUState *cs) 46 { 47 return cs->cpu_index; 48 } 49 50 int kvm_arch_init(MachineState *ms, KVMState *s) 51 { 52 /* MIPS has 128 signals */ 53 kvm_set_sigmask_len(s, 16); 54 55 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU); 56 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA); 57 58 DPRINTF("%s\n", __func__); 59 return 0; 60 } 61 62 int kvm_arch_irqchip_create(KVMState *s) 63 { 64 return 0; 65 } 66 67 int kvm_arch_init_vcpu(CPUState *cs) 68 { 69 MIPSCPU *cpu = MIPS_CPU(cs); 70 CPUMIPSState *env = &cpu->env; 71 int ret = 0; 72 73 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs); 74 75 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { 76 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0); 77 if (ret < 0) { 78 /* mark unsupported so it gets disabled on reset */ 79 kvm_mips_fpu_cap = 0; 80 ret = 0; 81 } 82 } 83 84 if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { 85 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0); 86 if (ret < 0) { 87 /* mark unsupported so it gets disabled on reset */ 88 kvm_mips_msa_cap = 0; 89 ret = 0; 90 } 91 } 92 93 DPRINTF("%s\n", __func__); 94 return ret; 95 } 96 97 int kvm_arch_destroy_vcpu(CPUState *cs) 98 { 99 return 0; 100 } 101 102 void kvm_mips_reset_vcpu(MIPSCPU *cpu) 103 { 104 CPUMIPSState *env = &cpu->env; 105 106 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { 107 warn_report("KVM does not support FPU, disabling"); 108 env->CP0_Config1 &= ~(1 << CP0C1_FP); 109 } 110 if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { 111 warn_report("KVM does not support MSA, disabling"); 112 env->CP0_Config3 &= ~(1 << CP0C3_MSAP); 113 } 114 115 DPRINTF("%s\n", __func__); 116 } 117 118 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 119 { 120 DPRINTF("%s\n", __func__); 121 return 0; 122 } 123 124 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 125 { 126 DPRINTF("%s\n", __func__); 127 return 0; 128 } 129 130 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu) 131 { 132 CPUMIPSState *env = &cpu->env; 133 134 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP)); 135 } 136 137 138 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) 139 { 140 MIPSCPU *cpu = MIPS_CPU(cs); 141 int r; 142 struct kvm_mips_interrupt intr; 143 144 qemu_mutex_lock_iothread(); 145 146 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 147 cpu_mips_io_interrupts_pending(cpu)) { 148 intr.cpu = -1; 149 intr.irq = 2; 150 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 151 if (r < 0) { 152 error_report("%s: cpu %d: failed to inject IRQ %x", 153 __func__, cs->cpu_index, intr.irq); 154 } 155 } 156 157 qemu_mutex_unlock_iothread(); 158 } 159 160 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 161 { 162 return MEMTXATTRS_UNSPECIFIED; 163 } 164 165 int kvm_arch_process_async_events(CPUState *cs) 166 { 167 return cs->halted; 168 } 169 170 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 171 { 172 int ret; 173 174 DPRINTF("%s\n", __func__); 175 switch (run->exit_reason) { 176 default: 177 error_report("%s: unknown exit reason %d", 178 __func__, run->exit_reason); 179 ret = -1; 180 break; 181 } 182 183 return ret; 184 } 185 186 bool kvm_arch_stop_on_emulation_error(CPUState *cs) 187 { 188 DPRINTF("%s\n", __func__); 189 return true; 190 } 191 192 void kvm_arch_init_irq_routing(KVMState *s) 193 { 194 } 195 196 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level) 197 { 198 CPUState *cs = CPU(cpu); 199 struct kvm_mips_interrupt intr; 200 201 if (!kvm_enabled()) { 202 return 0; 203 } 204 205 intr.cpu = -1; 206 207 if (level) { 208 intr.irq = irq; 209 } else { 210 intr.irq = -irq; 211 } 212 213 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 214 215 return 0; 216 } 217 218 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level) 219 { 220 CPUState *cs = current_cpu; 221 CPUState *dest_cs = CPU(cpu); 222 struct kvm_mips_interrupt intr; 223 224 if (!kvm_enabled()) { 225 return 0; 226 } 227 228 intr.cpu = dest_cs->cpu_index; 229 230 if (level) { 231 intr.irq = irq; 232 } else { 233 intr.irq = -irq; 234 } 235 236 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq); 237 238 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 239 240 return 0; 241 } 242 243 #define MIPS_CP0_32(_R, _S) \ 244 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) 245 246 #define MIPS_CP0_64(_R, _S) \ 247 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) 248 249 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 250 #define KVM_REG_MIPS_CP0_RANDOM MIPS_CP0_32(1, 0) 251 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) 252 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) 253 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) 254 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) 255 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5) 256 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6) 257 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7) 258 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) 259 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6) 260 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) 261 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) 262 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) 263 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) 264 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) 265 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 266 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 267 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) 268 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) 269 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) 270 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 271 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 272 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 273 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 274 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) 275 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) 276 #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6) 277 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) 278 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 279 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2) 280 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3) 281 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4) 282 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5) 283 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6) 284 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) 285 286 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id, 287 int32_t *addr) 288 { 289 struct kvm_one_reg cp0reg = { 290 .id = reg_id, 291 .addr = (uintptr_t)addr 292 }; 293 294 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 295 } 296 297 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id, 298 uint32_t *addr) 299 { 300 struct kvm_one_reg cp0reg = { 301 .id = reg_id, 302 .addr = (uintptr_t)addr 303 }; 304 305 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 306 } 307 308 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id, 309 target_ulong *addr) 310 { 311 uint64_t val64 = *addr; 312 struct kvm_one_reg cp0reg = { 313 .id = reg_id, 314 .addr = (uintptr_t)&val64 315 }; 316 317 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 318 } 319 320 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id, 321 int64_t *addr) 322 { 323 struct kvm_one_reg cp0reg = { 324 .id = reg_id, 325 .addr = (uintptr_t)addr 326 }; 327 328 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 329 } 330 331 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id, 332 uint64_t *addr) 333 { 334 struct kvm_one_reg cp0reg = { 335 .id = reg_id, 336 .addr = (uintptr_t)addr 337 }; 338 339 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 340 } 341 342 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id, 343 int32_t *addr) 344 { 345 struct kvm_one_reg cp0reg = { 346 .id = reg_id, 347 .addr = (uintptr_t)addr 348 }; 349 350 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 351 } 352 353 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id, 354 uint32_t *addr) 355 { 356 struct kvm_one_reg cp0reg = { 357 .id = reg_id, 358 .addr = (uintptr_t)addr 359 }; 360 361 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 362 } 363 364 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id, 365 target_ulong *addr) 366 { 367 int ret; 368 uint64_t val64 = 0; 369 struct kvm_one_reg cp0reg = { 370 .id = reg_id, 371 .addr = (uintptr_t)&val64 372 }; 373 374 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 375 if (ret >= 0) { 376 *addr = val64; 377 } 378 return ret; 379 } 380 381 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id, 382 int64_t *addr) 383 { 384 struct kvm_one_reg cp0reg = { 385 .id = reg_id, 386 .addr = (uintptr_t)addr 387 }; 388 389 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 390 } 391 392 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id, 393 uint64_t *addr) 394 { 395 struct kvm_one_reg cp0reg = { 396 .id = reg_id, 397 .addr = (uintptr_t)addr 398 }; 399 400 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 401 } 402 403 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M) 404 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \ 405 (1U << CP0C1_FP)) 406 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M) 407 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \ 408 (1U << CP0C3_MSAP)) 409 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M) 410 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \ 411 (1U << CP0C5_UFE) | \ 412 (1U << CP0C5_FRE) | \ 413 (1U << CP0C5_UFR)) 414 #define KVM_REG_MIPS_CP0_CONFIG6_MASK ((1U << CP0C6_BPPASS) | \ 415 (0x3fU << CP0C6_KPOS) | \ 416 (1U << CP0C6_KE) | \ 417 (1U << CP0C6_VTLBONLY) | \ 418 (1U << CP0C6_LASX) | \ 419 (1U << CP0C6_SSEN) | \ 420 (1U << CP0C6_DISDRTIME) | \ 421 (1U << CP0C6_PIXNUEN) | \ 422 (1U << CP0C6_SCRAND) | \ 423 (1U << CP0C6_LLEXCEN) | \ 424 (1U << CP0C6_DISVC) | \ 425 (1U << CP0C6_VCLRU) | \ 426 (1U << CP0C6_DCLRU) | \ 427 (1U << CP0C6_PIXUEN) | \ 428 (1U << CP0C6_DISBLKLYEN) | \ 429 (1U << CP0C6_UMEMUALEN) | \ 430 (1U << CP0C6_SFBEN) | \ 431 (1U << CP0C6_FLTINT) | \ 432 (1U << CP0C6_VLTINT) | \ 433 (1U << CP0C6_DISBTB) | \ 434 (3U << CP0C6_STPREFCTL) | \ 435 (1U << CP0C6_INSTPREF) | \ 436 (1U << CP0C6_DATAPREF)) 437 438 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id, 439 int32_t *addr, int32_t mask) 440 { 441 int err; 442 int32_t tmp, change; 443 444 err = kvm_mips_get_one_reg(cs, reg_id, &tmp); 445 if (err < 0) { 446 return err; 447 } 448 449 /* only change bits in mask */ 450 change = (*addr ^ tmp) & mask; 451 if (!change) { 452 return 0; 453 } 454 455 tmp = tmp ^ change; 456 return kvm_mips_put_one_reg(cs, reg_id, &tmp); 457 } 458 459 /* 460 * We freeze the KVM timer when either the VM clock is stopped or the state is 461 * saved (the state is dirty). 462 */ 463 464 /* 465 * Save the state of the KVM timer when VM clock is stopped or state is synced 466 * to QEMU. 467 */ 468 static int kvm_mips_save_count(CPUState *cs) 469 { 470 MIPSCPU *cpu = MIPS_CPU(cs); 471 CPUMIPSState *env = &cpu->env; 472 uint64_t count_ctl; 473 int err, ret = 0; 474 475 /* freeze KVM timer */ 476 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 477 if (err < 0) { 478 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err); 479 ret = err; 480 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { 481 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; 482 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 483 if (err < 0) { 484 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); 485 ret = err; 486 } 487 } 488 489 /* read CP0_Cause */ 490 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); 491 if (err < 0) { 492 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err); 493 ret = err; 494 } 495 496 /* read CP0_Count */ 497 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); 498 if (err < 0) { 499 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err); 500 ret = err; 501 } 502 503 return ret; 504 } 505 506 /* 507 * Restore the state of the KVM timer when VM clock is restarted or state is 508 * synced to KVM. 509 */ 510 static int kvm_mips_restore_count(CPUState *cs) 511 { 512 MIPSCPU *cpu = MIPS_CPU(cs); 513 CPUMIPSState *env = &cpu->env; 514 uint64_t count_ctl; 515 int err_dc, err, ret = 0; 516 517 /* check the timer is frozen */ 518 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 519 if (err_dc < 0) { 520 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc); 521 ret = err_dc; 522 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { 523 /* freeze timer (sets COUNT_RESUME for us) */ 524 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; 525 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 526 if (err < 0) { 527 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); 528 ret = err; 529 } 530 } 531 532 /* load CP0_Cause */ 533 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); 534 if (err < 0) { 535 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err); 536 ret = err; 537 } 538 539 /* load CP0_Count */ 540 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); 541 if (err < 0) { 542 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err); 543 ret = err; 544 } 545 546 /* resume KVM timer */ 547 if (err_dc >= 0) { 548 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC; 549 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 550 if (err < 0) { 551 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err); 552 ret = err; 553 } 554 } 555 556 return ret; 557 } 558 559 /* 560 * Handle the VM clock being started or stopped 561 */ 562 static void kvm_mips_update_state(void *opaque, int running, RunState state) 563 { 564 CPUState *cs = opaque; 565 int ret; 566 uint64_t count_resume; 567 568 /* 569 * If state is already dirty (synced to QEMU) then the KVM timer state is 570 * already saved and can be restored when it is synced back to KVM. 571 */ 572 if (!running) { 573 if (!cs->vcpu_dirty) { 574 ret = kvm_mips_save_count(cs); 575 if (ret < 0) { 576 warn_report("Failed saving count"); 577 } 578 } 579 } else { 580 /* Set clock restore time to now */ 581 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 582 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME, 583 &count_resume); 584 if (ret < 0) { 585 warn_report("Failed setting COUNT_RESUME"); 586 return; 587 } 588 589 if (!cs->vcpu_dirty) { 590 ret = kvm_mips_restore_count(cs); 591 if (ret < 0) { 592 warn_report("Failed restoring count"); 593 } 594 } 595 } 596 } 597 598 static int kvm_mips_put_fpu_registers(CPUState *cs, int level) 599 { 600 MIPSCPU *cpu = MIPS_CPU(cs); 601 CPUMIPSState *env = &cpu->env; 602 int err, ret = 0; 603 unsigned int i; 604 605 /* Only put FPU state if we're emulating a CPU with an FPU */ 606 if (env->CP0_Config1 & (1 << CP0C1_FP)) { 607 /* FPU Control Registers */ 608 if (level == KVM_PUT_FULL_STATE) { 609 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR, 610 &env->active_fpu.fcr0); 611 if (err < 0) { 612 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err); 613 ret = err; 614 } 615 } 616 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, 617 &env->active_fpu.fcr31); 618 if (err < 0) { 619 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err); 620 ret = err; 621 } 622 623 /* 624 * FPU register state is a subset of MSA vector state, so don't put FPU 625 * registers if we're emulating a CPU with MSA. 626 */ 627 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { 628 /* Floating point registers */ 629 for (i = 0; i < 32; ++i) { 630 if (env->CP0_Status & (1 << CP0St_FR)) { 631 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), 632 &env->active_fpu.fpr[i].d); 633 } else { 634 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), 635 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); 636 } 637 if (err < 0) { 638 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err); 639 ret = err; 640 } 641 } 642 } 643 } 644 645 /* Only put MSA state if we're emulating a CPU with MSA */ 646 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { 647 /* MSA Control Registers */ 648 if (level == KVM_PUT_FULL_STATE) { 649 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR, 650 &env->msair); 651 if (err < 0) { 652 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err); 653 ret = err; 654 } 655 } 656 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR, 657 &env->active_tc.msacsr); 658 if (err < 0) { 659 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err); 660 ret = err; 661 } 662 663 /* Vector registers (includes FP registers) */ 664 for (i = 0; i < 32; ++i) { 665 /* Big endian MSA not supported by QEMU yet anyway */ 666 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), 667 env->active_fpu.fpr[i].wr.d); 668 if (err < 0) { 669 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err); 670 ret = err; 671 } 672 } 673 } 674 675 return ret; 676 } 677 678 static int kvm_mips_get_fpu_registers(CPUState *cs) 679 { 680 MIPSCPU *cpu = MIPS_CPU(cs); 681 CPUMIPSState *env = &cpu->env; 682 int err, ret = 0; 683 unsigned int i; 684 685 /* Only get FPU state if we're emulating a CPU with an FPU */ 686 if (env->CP0_Config1 & (1 << CP0C1_FP)) { 687 /* FPU Control Registers */ 688 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR, 689 &env->active_fpu.fcr0); 690 if (err < 0) { 691 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err); 692 ret = err; 693 } 694 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, 695 &env->active_fpu.fcr31); 696 if (err < 0) { 697 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err); 698 ret = err; 699 } else { 700 restore_fp_status(env); 701 } 702 703 /* 704 * FPU register state is a subset of MSA vector state, so don't save FPU 705 * registers if we're emulating a CPU with MSA. 706 */ 707 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { 708 /* Floating point registers */ 709 for (i = 0; i < 32; ++i) { 710 if (env->CP0_Status & (1 << CP0St_FR)) { 711 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), 712 &env->active_fpu.fpr[i].d); 713 } else { 714 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), 715 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); 716 } 717 if (err < 0) { 718 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err); 719 ret = err; 720 } 721 } 722 } 723 } 724 725 /* Only get MSA state if we're emulating a CPU with MSA */ 726 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { 727 /* MSA Control Registers */ 728 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR, 729 &env->msair); 730 if (err < 0) { 731 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err); 732 ret = err; 733 } 734 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR, 735 &env->active_tc.msacsr); 736 if (err < 0) { 737 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err); 738 ret = err; 739 } else { 740 restore_msa_fp_status(env); 741 } 742 743 /* Vector registers (includes FP registers) */ 744 for (i = 0; i < 32; ++i) { 745 /* Big endian MSA not supported by QEMU yet anyway */ 746 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), 747 env->active_fpu.fpr[i].wr.d); 748 if (err < 0) { 749 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err); 750 ret = err; 751 } 752 } 753 } 754 755 return ret; 756 } 757 758 759 static int kvm_mips_put_cp0_registers(CPUState *cs, int level) 760 { 761 MIPSCPU *cpu = MIPS_CPU(cs); 762 CPUMIPSState *env = &cpu->env; 763 int err, ret = 0; 764 765 (void)level; 766 767 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); 768 if (err < 0) { 769 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err); 770 ret = err; 771 } 772 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random); 773 if (err < 0) { 774 DPRINTF("%s: Failed to put CP0_RANDOM (%d)\n", __func__, err); 775 ret = err; 776 } 777 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, 778 &env->CP0_Context); 779 if (err < 0) { 780 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err); 781 ret = err; 782 } 783 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, 784 &env->active_tc.CP0_UserLocal); 785 if (err < 0) { 786 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err); 787 ret = err; 788 } 789 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, 790 &env->CP0_PageMask); 791 if (err < 0) { 792 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err); 793 ret = err; 794 } 795 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN, 796 &env->CP0_PageGrain); 797 if (err < 0) { 798 DPRINTF("%s: Failed to put CP0_PAGEGRAIN (%d)\n", __func__, err); 799 ret = err; 800 } 801 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE, 802 &env->CP0_PWBase); 803 if (err < 0) { 804 DPRINTF("%s: Failed to put CP0_PWBASE (%d)\n", __func__, err); 805 ret = err; 806 } 807 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD, 808 &env->CP0_PWField); 809 if (err < 0) { 810 DPRINTF("%s: Failed to put CP0_PWField (%d)\n", __func__, err); 811 ret = err; 812 } 813 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE, 814 &env->CP0_PWSize); 815 if (err < 0) { 816 DPRINTF("%s: Failed to put CP0_PWSIZE (%d)\n", __func__, err); 817 ret = err; 818 } 819 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); 820 if (err < 0) { 821 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err); 822 ret = err; 823 } 824 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl); 825 if (err < 0) { 826 DPRINTF("%s: Failed to put CP0_PWCTL (%d)\n", __func__, err); 827 ret = err; 828 } 829 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); 830 if (err < 0) { 831 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err); 832 ret = err; 833 } 834 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, 835 &env->CP0_BadVAddr); 836 if (err < 0) { 837 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err); 838 ret = err; 839 } 840 841 /* If VM clock stopped then state will be restored when it is restarted */ 842 if (runstate_is_running()) { 843 err = kvm_mips_restore_count(cs); 844 if (err < 0) { 845 ret = err; 846 } 847 } 848 849 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, 850 &env->CP0_EntryHi); 851 if (err < 0) { 852 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err); 853 ret = err; 854 } 855 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, 856 &env->CP0_Compare); 857 if (err < 0) { 858 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err); 859 ret = err; 860 } 861 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); 862 if (err < 0) { 863 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err); 864 ret = err; 865 } 866 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); 867 if (err < 0) { 868 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err); 869 ret = err; 870 } 871 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); 872 if (err < 0) { 873 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err); 874 ret = err; 875 } 876 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase); 877 if (err < 0) { 878 DPRINTF("%s: Failed to put CP0_EBASE (%d)\n", __func__, err); 879 ret = err; 880 } 881 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, 882 &env->CP0_Config0, 883 KVM_REG_MIPS_CP0_CONFIG_MASK); 884 if (err < 0) { 885 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err); 886 ret = err; 887 } 888 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, 889 &env->CP0_Config1, 890 KVM_REG_MIPS_CP0_CONFIG1_MASK); 891 if (err < 0) { 892 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err); 893 ret = err; 894 } 895 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, 896 &env->CP0_Config2, 897 KVM_REG_MIPS_CP0_CONFIG2_MASK); 898 if (err < 0) { 899 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err); 900 ret = err; 901 } 902 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, 903 &env->CP0_Config3, 904 KVM_REG_MIPS_CP0_CONFIG3_MASK); 905 if (err < 0) { 906 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err); 907 ret = err; 908 } 909 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, 910 &env->CP0_Config4, 911 KVM_REG_MIPS_CP0_CONFIG4_MASK); 912 if (err < 0) { 913 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err); 914 ret = err; 915 } 916 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, 917 &env->CP0_Config5, 918 KVM_REG_MIPS_CP0_CONFIG5_MASK); 919 if (err < 0) { 920 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err); 921 ret = err; 922 } 923 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6, 924 &env->CP0_Config6, 925 KVM_REG_MIPS_CP0_CONFIG6_MASK); 926 if (err < 0) { 927 DPRINTF("%s: Failed to change CP0_CONFIG6 (%d)\n", __func__, err); 928 ret = err; 929 } 930 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT, 931 &env->CP0_XContext); 932 if (err < 0) { 933 DPRINTF("%s: Failed to put CP0_XCONTEXT (%d)\n", __func__, err); 934 ret = err; 935 } 936 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, 937 &env->CP0_ErrorEPC); 938 if (err < 0) { 939 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err); 940 ret = err; 941 } 942 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1, 943 &env->CP0_KScratch[0]); 944 if (err < 0) { 945 DPRINTF("%s: Failed to put CP0_KSCRATCH1 (%d)\n", __func__, err); 946 ret = err; 947 } 948 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2, 949 &env->CP0_KScratch[1]); 950 if (err < 0) { 951 DPRINTF("%s: Failed to put CP0_KSCRATCH2 (%d)\n", __func__, err); 952 ret = err; 953 } 954 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3, 955 &env->CP0_KScratch[2]); 956 if (err < 0) { 957 DPRINTF("%s: Failed to put CP0_KSCRATCH3 (%d)\n", __func__, err); 958 ret = err; 959 } 960 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4, 961 &env->CP0_KScratch[3]); 962 if (err < 0) { 963 DPRINTF("%s: Failed to put CP0_KSCRATCH4 (%d)\n", __func__, err); 964 ret = err; 965 } 966 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5, 967 &env->CP0_KScratch[4]); 968 if (err < 0) { 969 DPRINTF("%s: Failed to put CP0_KSCRATCH5 (%d)\n", __func__, err); 970 ret = err; 971 } 972 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6, 973 &env->CP0_KScratch[5]); 974 if (err < 0) { 975 DPRINTF("%s: Failed to put CP0_KSCRATCH6 (%d)\n", __func__, err); 976 ret = err; 977 } 978 979 return ret; 980 } 981 982 static int kvm_mips_get_cp0_registers(CPUState *cs) 983 { 984 MIPSCPU *cpu = MIPS_CPU(cs); 985 CPUMIPSState *env = &cpu->env; 986 int err, ret = 0; 987 988 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); 989 if (err < 0) { 990 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err); 991 ret = err; 992 } 993 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random); 994 if (err < 0) { 995 DPRINTF("%s: Failed to get CP0_RANDOM (%d)\n", __func__, err); 996 ret = err; 997 } 998 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, 999 &env->CP0_Context); 1000 if (err < 0) { 1001 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err); 1002 ret = err; 1003 } 1004 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, 1005 &env->active_tc.CP0_UserLocal); 1006 if (err < 0) { 1007 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err); 1008 ret = err; 1009 } 1010 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, 1011 &env->CP0_PageMask); 1012 if (err < 0) { 1013 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err); 1014 ret = err; 1015 } 1016 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN, 1017 &env->CP0_PageGrain); 1018 if (err < 0) { 1019 DPRINTF("%s: Failed to get CP0_PAGEGRAIN (%d)\n", __func__, err); 1020 ret = err; 1021 } 1022 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE, 1023 &env->CP0_PWBase); 1024 if (err < 0) { 1025 DPRINTF("%s: Failed to get CP0_PWBASE (%d)\n", __func__, err); 1026 ret = err; 1027 } 1028 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD, 1029 &env->CP0_PWField); 1030 if (err < 0) { 1031 DPRINTF("%s: Failed to get CP0_PWFIELD (%d)\n", __func__, err); 1032 ret = err; 1033 } 1034 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE, 1035 &env->CP0_PWSize); 1036 if (err < 0) { 1037 DPRINTF("%s: Failed to get CP0_PWSIZE (%d)\n", __func__, err); 1038 ret = err; 1039 } 1040 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); 1041 if (err < 0) { 1042 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err); 1043 ret = err; 1044 } 1045 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl); 1046 if (err < 0) { 1047 DPRINTF("%s: Failed to get CP0_PWCtl (%d)\n", __func__, err); 1048 ret = err; 1049 } 1050 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); 1051 if (err < 0) { 1052 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err); 1053 ret = err; 1054 } 1055 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, 1056 &env->CP0_BadVAddr); 1057 if (err < 0) { 1058 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err); 1059 ret = err; 1060 } 1061 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, 1062 &env->CP0_EntryHi); 1063 if (err < 0) { 1064 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err); 1065 ret = err; 1066 } 1067 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, 1068 &env->CP0_Compare); 1069 if (err < 0) { 1070 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err); 1071 ret = err; 1072 } 1073 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); 1074 if (err < 0) { 1075 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err); 1076 ret = err; 1077 } 1078 1079 /* If VM clock stopped then state was already saved when it was stopped */ 1080 if (runstate_is_running()) { 1081 err = kvm_mips_save_count(cs); 1082 if (err < 0) { 1083 ret = err; 1084 } 1085 } 1086 1087 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); 1088 if (err < 0) { 1089 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err); 1090 ret = err; 1091 } 1092 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); 1093 if (err < 0) { 1094 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err); 1095 ret = err; 1096 } 1097 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase); 1098 if (err < 0) { 1099 DPRINTF("%s: Failed to get CP0_EBASE (%d)\n", __func__, err); 1100 ret = err; 1101 } 1102 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0); 1103 if (err < 0) { 1104 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err); 1105 ret = err; 1106 } 1107 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1); 1108 if (err < 0) { 1109 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err); 1110 ret = err; 1111 } 1112 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2); 1113 if (err < 0) { 1114 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err); 1115 ret = err; 1116 } 1117 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3); 1118 if (err < 0) { 1119 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err); 1120 ret = err; 1121 } 1122 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4); 1123 if (err < 0) { 1124 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err); 1125 ret = err; 1126 } 1127 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5); 1128 if (err < 0) { 1129 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err); 1130 ret = err; 1131 } 1132 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6, &env->CP0_Config6); 1133 if (err < 0) { 1134 DPRINTF("%s: Failed to get CP0_CONFIG6 (%d)\n", __func__, err); 1135 ret = err; 1136 } 1137 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT, 1138 &env->CP0_XContext); 1139 if (err < 0) { 1140 DPRINTF("%s: Failed to get CP0_XCONTEXT (%d)\n", __func__, err); 1141 ret = err; 1142 } 1143 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, 1144 &env->CP0_ErrorEPC); 1145 if (err < 0) { 1146 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err); 1147 ret = err; 1148 } 1149 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1, 1150 &env->CP0_KScratch[0]); 1151 if (err < 0) { 1152 DPRINTF("%s: Failed to get CP0_KSCRATCH1 (%d)\n", __func__, err); 1153 ret = err; 1154 } 1155 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2, 1156 &env->CP0_KScratch[1]); 1157 if (err < 0) { 1158 DPRINTF("%s: Failed to get CP0_KSCRATCH2 (%d)\n", __func__, err); 1159 ret = err; 1160 } 1161 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3, 1162 &env->CP0_KScratch[2]); 1163 if (err < 0) { 1164 DPRINTF("%s: Failed to get CP0_KSCRATCH3 (%d)\n", __func__, err); 1165 ret = err; 1166 } 1167 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4, 1168 &env->CP0_KScratch[3]); 1169 if (err < 0) { 1170 DPRINTF("%s: Failed to get CP0_KSCRATCH4 (%d)\n", __func__, err); 1171 ret = err; 1172 } 1173 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5, 1174 &env->CP0_KScratch[4]); 1175 if (err < 0) { 1176 DPRINTF("%s: Failed to get CP0_KSCRATCH5 (%d)\n", __func__, err); 1177 ret = err; 1178 } 1179 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6, 1180 &env->CP0_KScratch[5]); 1181 if (err < 0) { 1182 DPRINTF("%s: Failed to get CP0_KSCRATCH6 (%d)\n", __func__, err); 1183 ret = err; 1184 } 1185 1186 return ret; 1187 } 1188 1189 int kvm_arch_put_registers(CPUState *cs, int level) 1190 { 1191 MIPSCPU *cpu = MIPS_CPU(cs); 1192 CPUMIPSState *env = &cpu->env; 1193 struct kvm_regs regs; 1194 int ret; 1195 int i; 1196 1197 /* Set the registers based on QEMU's view of things */ 1198 for (i = 0; i < 32; i++) { 1199 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i]; 1200 } 1201 1202 regs.hi = (int64_t)(target_long)env->active_tc.HI[0]; 1203 regs.lo = (int64_t)(target_long)env->active_tc.LO[0]; 1204 regs.pc = (int64_t)(target_long)env->active_tc.PC; 1205 1206 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 1207 1208 if (ret < 0) { 1209 return ret; 1210 } 1211 1212 ret = kvm_mips_put_cp0_registers(cs, level); 1213 if (ret < 0) { 1214 return ret; 1215 } 1216 1217 ret = kvm_mips_put_fpu_registers(cs, level); 1218 if (ret < 0) { 1219 return ret; 1220 } 1221 1222 return ret; 1223 } 1224 1225 int kvm_arch_get_registers(CPUState *cs) 1226 { 1227 MIPSCPU *cpu = MIPS_CPU(cs); 1228 CPUMIPSState *env = &cpu->env; 1229 int ret = 0; 1230 struct kvm_regs regs; 1231 int i; 1232 1233 /* Get the current register set as KVM seems it */ 1234 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 1235 1236 if (ret < 0) { 1237 return ret; 1238 } 1239 1240 for (i = 0; i < 32; i++) { 1241 env->active_tc.gpr[i] = regs.gpr[i]; 1242 } 1243 1244 env->active_tc.HI[0] = regs.hi; 1245 env->active_tc.LO[0] = regs.lo; 1246 env->active_tc.PC = regs.pc; 1247 1248 kvm_mips_get_cp0_registers(cs); 1249 kvm_mips_get_fpu_registers(cs); 1250 1251 return ret; 1252 } 1253 1254 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 1255 uint64_t address, uint32_t data, PCIDevice *dev) 1256 { 1257 return 0; 1258 } 1259 1260 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 1261 int vector, PCIDevice *dev) 1262 { 1263 return 0; 1264 } 1265 1266 int kvm_arch_release_virq_post(int virq) 1267 { 1268 return 0; 1269 } 1270 1271 int kvm_arch_msi_data_to_gsi(uint32_t data) 1272 { 1273 abort(); 1274 } 1275 1276 int mips_kvm_type(MachineState *machine, const char *vm_type) 1277 { 1278 #if defined(KVM_CAP_MIPS_VZ) || defined(KVM_CAP_MIPS_TE) 1279 int r; 1280 KVMState *s = KVM_STATE(machine->accelerator); 1281 #endif 1282 1283 #if defined(KVM_CAP_MIPS_VZ) 1284 r = kvm_check_extension(s, KVM_CAP_MIPS_VZ); 1285 if (r > 0) { 1286 return KVM_VM_MIPS_VZ; 1287 } 1288 #endif 1289 1290 #if defined(KVM_CAP_MIPS_TE) 1291 r = kvm_check_extension(s, KVM_CAP_MIPS_TE); 1292 if (r > 0) { 1293 return KVM_VM_MIPS_TE; 1294 } 1295 #endif 1296 1297 return -1; 1298 } 1299