1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: MIPS specific KVM APIs 7 * 8 * Copyright (C) 2012-2014 Imagination Technologies Ltd. 9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 */ 11 12 #include "qemu/osdep.h" 13 #include <sys/ioctl.h> 14 15 #include <linux/kvm.h> 16 17 #include "qemu-common.h" 18 #include "cpu.h" 19 #include "internal.h" 20 #include "qemu/error-report.h" 21 #include "qemu/timer.h" 22 #include "sysemu/sysemu.h" 23 #include "sysemu/kvm.h" 24 #include "sysemu/cpus.h" 25 #include "kvm_mips.h" 26 #include "exec/memattrs.h" 27 28 #define DEBUG_KVM 0 29 30 #define DPRINTF(fmt, ...) \ 31 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0) 32 33 static int kvm_mips_fpu_cap; 34 static int kvm_mips_msa_cap; 35 36 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 37 KVM_CAP_LAST_INFO 38 }; 39 40 static void kvm_mips_update_state(void *opaque, int running, RunState state); 41 42 unsigned long kvm_arch_vcpu_id(CPUState *cs) 43 { 44 return cs->cpu_index; 45 } 46 47 int kvm_arch_init(MachineState *ms, KVMState *s) 48 { 49 /* MIPS has 128 signals */ 50 kvm_set_sigmask_len(s, 16); 51 52 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU); 53 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA); 54 55 DPRINTF("%s\n", __func__); 56 return 0; 57 } 58 59 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) 60 { 61 return 0; 62 } 63 64 int kvm_arch_init_vcpu(CPUState *cs) 65 { 66 MIPSCPU *cpu = MIPS_CPU(cs); 67 CPUMIPSState *env = &cpu->env; 68 int ret = 0; 69 70 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs); 71 72 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { 73 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0); 74 if (ret < 0) { 75 /* mark unsupported so it gets disabled on reset */ 76 kvm_mips_fpu_cap = 0; 77 ret = 0; 78 } 79 } 80 81 if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { 82 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0); 83 if (ret < 0) { 84 /* mark unsupported so it gets disabled on reset */ 85 kvm_mips_msa_cap = 0; 86 ret = 0; 87 } 88 } 89 90 DPRINTF("%s\n", __func__); 91 return ret; 92 } 93 94 int kvm_arch_destroy_vcpu(CPUState *cs) 95 { 96 return 0; 97 } 98 99 void kvm_mips_reset_vcpu(MIPSCPU *cpu) 100 { 101 CPUMIPSState *env = &cpu->env; 102 103 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { 104 warn_report("KVM does not support FPU, disabling"); 105 env->CP0_Config1 &= ~(1 << CP0C1_FP); 106 } 107 if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { 108 warn_report("KVM does not support MSA, disabling"); 109 env->CP0_Config3 &= ~(1 << CP0C3_MSAP); 110 } 111 112 DPRINTF("%s\n", __func__); 113 } 114 115 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 116 { 117 DPRINTF("%s\n", __func__); 118 return 0; 119 } 120 121 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 122 { 123 DPRINTF("%s\n", __func__); 124 return 0; 125 } 126 127 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu) 128 { 129 CPUMIPSState *env = &cpu->env; 130 131 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP)); 132 } 133 134 135 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) 136 { 137 MIPSCPU *cpu = MIPS_CPU(cs); 138 int r; 139 struct kvm_mips_interrupt intr; 140 141 qemu_mutex_lock_iothread(); 142 143 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 144 cpu_mips_io_interrupts_pending(cpu)) { 145 intr.cpu = -1; 146 intr.irq = 2; 147 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 148 if (r < 0) { 149 error_report("%s: cpu %d: failed to inject IRQ %x", 150 __func__, cs->cpu_index, intr.irq); 151 } 152 } 153 154 qemu_mutex_unlock_iothread(); 155 } 156 157 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 158 { 159 return MEMTXATTRS_UNSPECIFIED; 160 } 161 162 int kvm_arch_process_async_events(CPUState *cs) 163 { 164 return cs->halted; 165 } 166 167 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 168 { 169 int ret; 170 171 DPRINTF("%s\n", __func__); 172 switch (run->exit_reason) { 173 default: 174 error_report("%s: unknown exit reason %d", 175 __func__, run->exit_reason); 176 ret = -1; 177 break; 178 } 179 180 return ret; 181 } 182 183 bool kvm_arch_stop_on_emulation_error(CPUState *cs) 184 { 185 DPRINTF("%s\n", __func__); 186 return true; 187 } 188 189 void kvm_arch_init_irq_routing(KVMState *s) 190 { 191 } 192 193 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level) 194 { 195 CPUState *cs = CPU(cpu); 196 struct kvm_mips_interrupt intr; 197 198 if (!kvm_enabled()) { 199 return 0; 200 } 201 202 intr.cpu = -1; 203 204 if (level) { 205 intr.irq = irq; 206 } else { 207 intr.irq = -irq; 208 } 209 210 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 211 212 return 0; 213 } 214 215 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level) 216 { 217 CPUState *cs = current_cpu; 218 CPUState *dest_cs = CPU(cpu); 219 struct kvm_mips_interrupt intr; 220 221 if (!kvm_enabled()) { 222 return 0; 223 } 224 225 intr.cpu = dest_cs->cpu_index; 226 227 if (level) { 228 intr.irq = irq; 229 } else { 230 intr.irq = -irq; 231 } 232 233 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq); 234 235 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 236 237 return 0; 238 } 239 240 #define MIPS_CP0_32(_R, _S) \ 241 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) 242 243 #define MIPS_CP0_64(_R, _S) \ 244 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) 245 246 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 247 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) 248 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) 249 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) 250 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) 251 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) 252 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) 253 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) 254 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) 255 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) 256 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 257 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 258 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) 259 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) 260 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 261 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 262 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 263 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 264 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) 265 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) 266 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 267 268 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id, 269 int32_t *addr) 270 { 271 struct kvm_one_reg cp0reg = { 272 .id = reg_id, 273 .addr = (uintptr_t)addr 274 }; 275 276 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 277 } 278 279 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id, 280 uint32_t *addr) 281 { 282 struct kvm_one_reg cp0reg = { 283 .id = reg_id, 284 .addr = (uintptr_t)addr 285 }; 286 287 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 288 } 289 290 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id, 291 target_ulong *addr) 292 { 293 uint64_t val64 = *addr; 294 struct kvm_one_reg cp0reg = { 295 .id = reg_id, 296 .addr = (uintptr_t)&val64 297 }; 298 299 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 300 } 301 302 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id, 303 int64_t *addr) 304 { 305 struct kvm_one_reg cp0reg = { 306 .id = reg_id, 307 .addr = (uintptr_t)addr 308 }; 309 310 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 311 } 312 313 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id, 314 uint64_t *addr) 315 { 316 struct kvm_one_reg cp0reg = { 317 .id = reg_id, 318 .addr = (uintptr_t)addr 319 }; 320 321 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 322 } 323 324 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id, 325 int32_t *addr) 326 { 327 struct kvm_one_reg cp0reg = { 328 .id = reg_id, 329 .addr = (uintptr_t)addr 330 }; 331 332 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 333 } 334 335 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id, 336 uint32_t *addr) 337 { 338 struct kvm_one_reg cp0reg = { 339 .id = reg_id, 340 .addr = (uintptr_t)addr 341 }; 342 343 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 344 } 345 346 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id, 347 target_ulong *addr) 348 { 349 int ret; 350 uint64_t val64 = 0; 351 struct kvm_one_reg cp0reg = { 352 .id = reg_id, 353 .addr = (uintptr_t)&val64 354 }; 355 356 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 357 if (ret >= 0) { 358 *addr = val64; 359 } 360 return ret; 361 } 362 363 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id, 364 int64_t *addr) 365 { 366 struct kvm_one_reg cp0reg = { 367 .id = reg_id, 368 .addr = (uintptr_t)addr 369 }; 370 371 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 372 } 373 374 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id, 375 uint64_t *addr) 376 { 377 struct kvm_one_reg cp0reg = { 378 .id = reg_id, 379 .addr = (uintptr_t)addr 380 }; 381 382 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 383 } 384 385 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M) 386 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \ 387 (1U << CP0C1_FP)) 388 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M) 389 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \ 390 (1U << CP0C3_MSAP)) 391 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M) 392 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \ 393 (1U << CP0C5_UFE) | \ 394 (1U << CP0C5_FRE) | \ 395 (1U << CP0C5_UFR)) 396 397 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id, 398 int32_t *addr, int32_t mask) 399 { 400 int err; 401 int32_t tmp, change; 402 403 err = kvm_mips_get_one_reg(cs, reg_id, &tmp); 404 if (err < 0) { 405 return err; 406 } 407 408 /* only change bits in mask */ 409 change = (*addr ^ tmp) & mask; 410 if (!change) { 411 return 0; 412 } 413 414 tmp = tmp ^ change; 415 return kvm_mips_put_one_reg(cs, reg_id, &tmp); 416 } 417 418 /* 419 * We freeze the KVM timer when either the VM clock is stopped or the state is 420 * saved (the state is dirty). 421 */ 422 423 /* 424 * Save the state of the KVM timer when VM clock is stopped or state is synced 425 * to QEMU. 426 */ 427 static int kvm_mips_save_count(CPUState *cs) 428 { 429 MIPSCPU *cpu = MIPS_CPU(cs); 430 CPUMIPSState *env = &cpu->env; 431 uint64_t count_ctl; 432 int err, ret = 0; 433 434 /* freeze KVM timer */ 435 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 436 if (err < 0) { 437 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err); 438 ret = err; 439 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { 440 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; 441 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 442 if (err < 0) { 443 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); 444 ret = err; 445 } 446 } 447 448 /* read CP0_Cause */ 449 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); 450 if (err < 0) { 451 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err); 452 ret = err; 453 } 454 455 /* read CP0_Count */ 456 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); 457 if (err < 0) { 458 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err); 459 ret = err; 460 } 461 462 return ret; 463 } 464 465 /* 466 * Restore the state of the KVM timer when VM clock is restarted or state is 467 * synced to KVM. 468 */ 469 static int kvm_mips_restore_count(CPUState *cs) 470 { 471 MIPSCPU *cpu = MIPS_CPU(cs); 472 CPUMIPSState *env = &cpu->env; 473 uint64_t count_ctl; 474 int err_dc, err, ret = 0; 475 476 /* check the timer is frozen */ 477 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 478 if (err_dc < 0) { 479 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc); 480 ret = err_dc; 481 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { 482 /* freeze timer (sets COUNT_RESUME for us) */ 483 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; 484 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 485 if (err < 0) { 486 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); 487 ret = err; 488 } 489 } 490 491 /* load CP0_Cause */ 492 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); 493 if (err < 0) { 494 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err); 495 ret = err; 496 } 497 498 /* load CP0_Count */ 499 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); 500 if (err < 0) { 501 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err); 502 ret = err; 503 } 504 505 /* resume KVM timer */ 506 if (err_dc >= 0) { 507 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC; 508 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 509 if (err < 0) { 510 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err); 511 ret = err; 512 } 513 } 514 515 return ret; 516 } 517 518 /* 519 * Handle the VM clock being started or stopped 520 */ 521 static void kvm_mips_update_state(void *opaque, int running, RunState state) 522 { 523 CPUState *cs = opaque; 524 int ret; 525 uint64_t count_resume; 526 527 /* 528 * If state is already dirty (synced to QEMU) then the KVM timer state is 529 * already saved and can be restored when it is synced back to KVM. 530 */ 531 if (!running) { 532 if (!cs->vcpu_dirty) { 533 ret = kvm_mips_save_count(cs); 534 if (ret < 0) { 535 warn_report("Failed saving count"); 536 } 537 } 538 } else { 539 /* Set clock restore time to now */ 540 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 541 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME, 542 &count_resume); 543 if (ret < 0) { 544 warn_report("Failed setting COUNT_RESUME"); 545 return; 546 } 547 548 if (!cs->vcpu_dirty) { 549 ret = kvm_mips_restore_count(cs); 550 if (ret < 0) { 551 warn_report("Failed restoring count"); 552 } 553 } 554 } 555 } 556 557 static int kvm_mips_put_fpu_registers(CPUState *cs, int level) 558 { 559 MIPSCPU *cpu = MIPS_CPU(cs); 560 CPUMIPSState *env = &cpu->env; 561 int err, ret = 0; 562 unsigned int i; 563 564 /* Only put FPU state if we're emulating a CPU with an FPU */ 565 if (env->CP0_Config1 & (1 << CP0C1_FP)) { 566 /* FPU Control Registers */ 567 if (level == KVM_PUT_FULL_STATE) { 568 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR, 569 &env->active_fpu.fcr0); 570 if (err < 0) { 571 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err); 572 ret = err; 573 } 574 } 575 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, 576 &env->active_fpu.fcr31); 577 if (err < 0) { 578 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err); 579 ret = err; 580 } 581 582 /* 583 * FPU register state is a subset of MSA vector state, so don't put FPU 584 * registers if we're emulating a CPU with MSA. 585 */ 586 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { 587 /* Floating point registers */ 588 for (i = 0; i < 32; ++i) { 589 if (env->CP0_Status & (1 << CP0St_FR)) { 590 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), 591 &env->active_fpu.fpr[i].d); 592 } else { 593 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), 594 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); 595 } 596 if (err < 0) { 597 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err); 598 ret = err; 599 } 600 } 601 } 602 } 603 604 /* Only put MSA state if we're emulating a CPU with MSA */ 605 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { 606 /* MSA Control Registers */ 607 if (level == KVM_PUT_FULL_STATE) { 608 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR, 609 &env->msair); 610 if (err < 0) { 611 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err); 612 ret = err; 613 } 614 } 615 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR, 616 &env->active_tc.msacsr); 617 if (err < 0) { 618 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err); 619 ret = err; 620 } 621 622 /* Vector registers (includes FP registers) */ 623 for (i = 0; i < 32; ++i) { 624 /* Big endian MSA not supported by QEMU yet anyway */ 625 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), 626 env->active_fpu.fpr[i].wr.d); 627 if (err < 0) { 628 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err); 629 ret = err; 630 } 631 } 632 } 633 634 return ret; 635 } 636 637 static int kvm_mips_get_fpu_registers(CPUState *cs) 638 { 639 MIPSCPU *cpu = MIPS_CPU(cs); 640 CPUMIPSState *env = &cpu->env; 641 int err, ret = 0; 642 unsigned int i; 643 644 /* Only get FPU state if we're emulating a CPU with an FPU */ 645 if (env->CP0_Config1 & (1 << CP0C1_FP)) { 646 /* FPU Control Registers */ 647 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR, 648 &env->active_fpu.fcr0); 649 if (err < 0) { 650 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err); 651 ret = err; 652 } 653 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, 654 &env->active_fpu.fcr31); 655 if (err < 0) { 656 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err); 657 ret = err; 658 } else { 659 restore_fp_status(env); 660 } 661 662 /* 663 * FPU register state is a subset of MSA vector state, so don't save FPU 664 * registers if we're emulating a CPU with MSA. 665 */ 666 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { 667 /* Floating point registers */ 668 for (i = 0; i < 32; ++i) { 669 if (env->CP0_Status & (1 << CP0St_FR)) { 670 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), 671 &env->active_fpu.fpr[i].d); 672 } else { 673 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), 674 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); 675 } 676 if (err < 0) { 677 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err); 678 ret = err; 679 } 680 } 681 } 682 } 683 684 /* Only get MSA state if we're emulating a CPU with MSA */ 685 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { 686 /* MSA Control Registers */ 687 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR, 688 &env->msair); 689 if (err < 0) { 690 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err); 691 ret = err; 692 } 693 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR, 694 &env->active_tc.msacsr); 695 if (err < 0) { 696 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err); 697 ret = err; 698 } else { 699 restore_msa_fp_status(env); 700 } 701 702 /* Vector registers (includes FP registers) */ 703 for (i = 0; i < 32; ++i) { 704 /* Big endian MSA not supported by QEMU yet anyway */ 705 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), 706 env->active_fpu.fpr[i].wr.d); 707 if (err < 0) { 708 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err); 709 ret = err; 710 } 711 } 712 } 713 714 return ret; 715 } 716 717 718 static int kvm_mips_put_cp0_registers(CPUState *cs, int level) 719 { 720 MIPSCPU *cpu = MIPS_CPU(cs); 721 CPUMIPSState *env = &cpu->env; 722 int err, ret = 0; 723 724 (void)level; 725 726 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); 727 if (err < 0) { 728 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err); 729 ret = err; 730 } 731 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, 732 &env->CP0_Context); 733 if (err < 0) { 734 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err); 735 ret = err; 736 } 737 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, 738 &env->active_tc.CP0_UserLocal); 739 if (err < 0) { 740 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err); 741 ret = err; 742 } 743 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, 744 &env->CP0_PageMask); 745 if (err < 0) { 746 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err); 747 ret = err; 748 } 749 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); 750 if (err < 0) { 751 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err); 752 ret = err; 753 } 754 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); 755 if (err < 0) { 756 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err); 757 ret = err; 758 } 759 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, 760 &env->CP0_BadVAddr); 761 if (err < 0) { 762 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err); 763 ret = err; 764 } 765 766 /* If VM clock stopped then state will be restored when it is restarted */ 767 if (runstate_is_running()) { 768 err = kvm_mips_restore_count(cs); 769 if (err < 0) { 770 ret = err; 771 } 772 } 773 774 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, 775 &env->CP0_EntryHi); 776 if (err < 0) { 777 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err); 778 ret = err; 779 } 780 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, 781 &env->CP0_Compare); 782 if (err < 0) { 783 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err); 784 ret = err; 785 } 786 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); 787 if (err < 0) { 788 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err); 789 ret = err; 790 } 791 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); 792 if (err < 0) { 793 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err); 794 ret = err; 795 } 796 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); 797 if (err < 0) { 798 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err); 799 ret = err; 800 } 801 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, 802 &env->CP0_Config0, 803 KVM_REG_MIPS_CP0_CONFIG_MASK); 804 if (err < 0) { 805 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err); 806 ret = err; 807 } 808 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, 809 &env->CP0_Config1, 810 KVM_REG_MIPS_CP0_CONFIG1_MASK); 811 if (err < 0) { 812 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err); 813 ret = err; 814 } 815 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, 816 &env->CP0_Config2, 817 KVM_REG_MIPS_CP0_CONFIG2_MASK); 818 if (err < 0) { 819 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err); 820 ret = err; 821 } 822 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, 823 &env->CP0_Config3, 824 KVM_REG_MIPS_CP0_CONFIG3_MASK); 825 if (err < 0) { 826 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err); 827 ret = err; 828 } 829 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, 830 &env->CP0_Config4, 831 KVM_REG_MIPS_CP0_CONFIG4_MASK); 832 if (err < 0) { 833 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err); 834 ret = err; 835 } 836 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, 837 &env->CP0_Config5, 838 KVM_REG_MIPS_CP0_CONFIG5_MASK); 839 if (err < 0) { 840 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err); 841 ret = err; 842 } 843 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, 844 &env->CP0_ErrorEPC); 845 if (err < 0) { 846 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err); 847 ret = err; 848 } 849 850 return ret; 851 } 852 853 static int kvm_mips_get_cp0_registers(CPUState *cs) 854 { 855 MIPSCPU *cpu = MIPS_CPU(cs); 856 CPUMIPSState *env = &cpu->env; 857 int err, ret = 0; 858 859 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); 860 if (err < 0) { 861 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err); 862 ret = err; 863 } 864 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, 865 &env->CP0_Context); 866 if (err < 0) { 867 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err); 868 ret = err; 869 } 870 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, 871 &env->active_tc.CP0_UserLocal); 872 if (err < 0) { 873 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err); 874 ret = err; 875 } 876 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, 877 &env->CP0_PageMask); 878 if (err < 0) { 879 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err); 880 ret = err; 881 } 882 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); 883 if (err < 0) { 884 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err); 885 ret = err; 886 } 887 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); 888 if (err < 0) { 889 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err); 890 ret = err; 891 } 892 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, 893 &env->CP0_BadVAddr); 894 if (err < 0) { 895 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err); 896 ret = err; 897 } 898 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, 899 &env->CP0_EntryHi); 900 if (err < 0) { 901 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err); 902 ret = err; 903 } 904 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, 905 &env->CP0_Compare); 906 if (err < 0) { 907 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err); 908 ret = err; 909 } 910 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); 911 if (err < 0) { 912 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err); 913 ret = err; 914 } 915 916 /* If VM clock stopped then state was already saved when it was stopped */ 917 if (runstate_is_running()) { 918 err = kvm_mips_save_count(cs); 919 if (err < 0) { 920 ret = err; 921 } 922 } 923 924 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); 925 if (err < 0) { 926 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err); 927 ret = err; 928 } 929 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); 930 if (err < 0) { 931 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err); 932 ret = err; 933 } 934 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0); 935 if (err < 0) { 936 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err); 937 ret = err; 938 } 939 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1); 940 if (err < 0) { 941 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err); 942 ret = err; 943 } 944 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2); 945 if (err < 0) { 946 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err); 947 ret = err; 948 } 949 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3); 950 if (err < 0) { 951 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err); 952 ret = err; 953 } 954 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4); 955 if (err < 0) { 956 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err); 957 ret = err; 958 } 959 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5); 960 if (err < 0) { 961 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err); 962 ret = err; 963 } 964 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, 965 &env->CP0_ErrorEPC); 966 if (err < 0) { 967 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err); 968 ret = err; 969 } 970 971 return ret; 972 } 973 974 int kvm_arch_put_registers(CPUState *cs, int level) 975 { 976 MIPSCPU *cpu = MIPS_CPU(cs); 977 CPUMIPSState *env = &cpu->env; 978 struct kvm_regs regs; 979 int ret; 980 int i; 981 982 /* Set the registers based on QEMU's view of things */ 983 for (i = 0; i < 32; i++) { 984 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i]; 985 } 986 987 regs.hi = (int64_t)(target_long)env->active_tc.HI[0]; 988 regs.lo = (int64_t)(target_long)env->active_tc.LO[0]; 989 regs.pc = (int64_t)(target_long)env->active_tc.PC; 990 991 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 992 993 if (ret < 0) { 994 return ret; 995 } 996 997 ret = kvm_mips_put_cp0_registers(cs, level); 998 if (ret < 0) { 999 return ret; 1000 } 1001 1002 ret = kvm_mips_put_fpu_registers(cs, level); 1003 if (ret < 0) { 1004 return ret; 1005 } 1006 1007 return ret; 1008 } 1009 1010 int kvm_arch_get_registers(CPUState *cs) 1011 { 1012 MIPSCPU *cpu = MIPS_CPU(cs); 1013 CPUMIPSState *env = &cpu->env; 1014 int ret = 0; 1015 struct kvm_regs regs; 1016 int i; 1017 1018 /* Get the current register set as KVM seems it */ 1019 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 1020 1021 if (ret < 0) { 1022 return ret; 1023 } 1024 1025 for (i = 0; i < 32; i++) { 1026 env->active_tc.gpr[i] = regs.gpr[i]; 1027 } 1028 1029 env->active_tc.HI[0] = regs.hi; 1030 env->active_tc.LO[0] = regs.lo; 1031 env->active_tc.PC = regs.pc; 1032 1033 kvm_mips_get_cp0_registers(cs); 1034 kvm_mips_get_fpu_registers(cs); 1035 1036 return ret; 1037 } 1038 1039 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 1040 uint64_t address, uint32_t data, PCIDevice *dev) 1041 { 1042 return 0; 1043 } 1044 1045 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 1046 int vector, PCIDevice *dev) 1047 { 1048 return 0; 1049 } 1050 1051 int kvm_arch_release_virq_post(int virq) 1052 { 1053 return 0; 1054 } 1055 1056 int kvm_arch_msi_data_to_gsi(uint32_t data) 1057 { 1058 abort(); 1059 } 1060