1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: MIPS specific KVM APIs 7 * 8 * Copyright (C) 2012-2014 Imagination Technologies Ltd. 9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 */ 11 12 #include "qemu/osdep.h" 13 #include <sys/ioctl.h> 14 15 #include <linux/kvm.h> 16 17 #include "qemu-common.h" 18 #include "cpu.h" 19 #include "qemu/error-report.h" 20 #include "qemu/timer.h" 21 #include "sysemu/sysemu.h" 22 #include "sysemu/kvm.h" 23 #include "sysemu/cpus.h" 24 #include "kvm_mips.h" 25 #include "exec/memattrs.h" 26 27 #define DEBUG_KVM 0 28 29 #define DPRINTF(fmt, ...) \ 30 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0) 31 32 static int kvm_mips_fpu_cap; 33 static int kvm_mips_msa_cap; 34 35 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 36 KVM_CAP_LAST_INFO 37 }; 38 39 static void kvm_mips_update_state(void *opaque, int running, RunState state); 40 41 unsigned long kvm_arch_vcpu_id(CPUState *cs) 42 { 43 return cs->cpu_index; 44 } 45 46 int kvm_arch_init(MachineState *ms, KVMState *s) 47 { 48 /* MIPS has 128 signals */ 49 kvm_set_sigmask_len(s, 16); 50 51 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU); 52 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA); 53 54 DPRINTF("%s\n", __func__); 55 return 0; 56 } 57 58 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) 59 { 60 return 0; 61 } 62 63 int kvm_arch_init_vcpu(CPUState *cs) 64 { 65 MIPSCPU *cpu = MIPS_CPU(cs); 66 CPUMIPSState *env = &cpu->env; 67 int ret = 0; 68 69 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs); 70 71 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { 72 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0); 73 if (ret < 0) { 74 /* mark unsupported so it gets disabled on reset */ 75 kvm_mips_fpu_cap = 0; 76 ret = 0; 77 } 78 } 79 80 if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { 81 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0); 82 if (ret < 0) { 83 /* mark unsupported so it gets disabled on reset */ 84 kvm_mips_msa_cap = 0; 85 ret = 0; 86 } 87 } 88 89 DPRINTF("%s\n", __func__); 90 return ret; 91 } 92 93 void kvm_mips_reset_vcpu(MIPSCPU *cpu) 94 { 95 CPUMIPSState *env = &cpu->env; 96 97 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { 98 fprintf(stderr, "Warning: KVM does not support FPU, disabling\n"); 99 env->CP0_Config1 &= ~(1 << CP0C1_FP); 100 } 101 if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { 102 fprintf(stderr, "Warning: KVM does not support MSA, disabling\n"); 103 env->CP0_Config3 &= ~(1 << CP0C3_MSAP); 104 } 105 106 DPRINTF("%s\n", __func__); 107 } 108 109 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 110 { 111 DPRINTF("%s\n", __func__); 112 return 0; 113 } 114 115 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 116 { 117 DPRINTF("%s\n", __func__); 118 return 0; 119 } 120 121 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu) 122 { 123 CPUMIPSState *env = &cpu->env; 124 125 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP)); 126 } 127 128 129 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) 130 { 131 MIPSCPU *cpu = MIPS_CPU(cs); 132 int r; 133 struct kvm_mips_interrupt intr; 134 135 qemu_mutex_lock_iothread(); 136 137 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 138 cpu_mips_io_interrupts_pending(cpu)) { 139 intr.cpu = -1; 140 intr.irq = 2; 141 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 142 if (r < 0) { 143 error_report("%s: cpu %d: failed to inject IRQ %x", 144 __func__, cs->cpu_index, intr.irq); 145 } 146 } 147 148 qemu_mutex_unlock_iothread(); 149 } 150 151 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 152 { 153 return MEMTXATTRS_UNSPECIFIED; 154 } 155 156 int kvm_arch_process_async_events(CPUState *cs) 157 { 158 return cs->halted; 159 } 160 161 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 162 { 163 int ret; 164 165 DPRINTF("%s\n", __func__); 166 switch (run->exit_reason) { 167 default: 168 error_report("%s: unknown exit reason %d", 169 __func__, run->exit_reason); 170 ret = -1; 171 break; 172 } 173 174 return ret; 175 } 176 177 bool kvm_arch_stop_on_emulation_error(CPUState *cs) 178 { 179 DPRINTF("%s\n", __func__); 180 return true; 181 } 182 183 int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr) 184 { 185 DPRINTF("%s\n", __func__); 186 return 1; 187 } 188 189 int kvm_arch_on_sigbus(int code, void *addr) 190 { 191 DPRINTF("%s\n", __func__); 192 return 1; 193 } 194 195 void kvm_arch_init_irq_routing(KVMState *s) 196 { 197 } 198 199 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level) 200 { 201 CPUState *cs = CPU(cpu); 202 struct kvm_mips_interrupt intr; 203 204 if (!kvm_enabled()) { 205 return 0; 206 } 207 208 intr.cpu = -1; 209 210 if (level) { 211 intr.irq = irq; 212 } else { 213 intr.irq = -irq; 214 } 215 216 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 217 218 return 0; 219 } 220 221 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level) 222 { 223 CPUState *cs = current_cpu; 224 CPUState *dest_cs = CPU(cpu); 225 struct kvm_mips_interrupt intr; 226 227 if (!kvm_enabled()) { 228 return 0; 229 } 230 231 intr.cpu = dest_cs->cpu_index; 232 233 if (level) { 234 intr.irq = irq; 235 } else { 236 intr.irq = -irq; 237 } 238 239 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq); 240 241 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 242 243 return 0; 244 } 245 246 #define MIPS_CP0_32(_R, _S) \ 247 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) 248 249 #define MIPS_CP0_64(_R, _S) \ 250 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) 251 252 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 253 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) 254 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) 255 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) 256 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) 257 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) 258 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) 259 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) 260 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) 261 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) 262 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 263 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 264 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) 265 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) 266 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 267 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 268 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 269 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 270 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) 271 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) 272 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 273 274 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id, 275 int32_t *addr) 276 { 277 struct kvm_one_reg cp0reg = { 278 .id = reg_id, 279 .addr = (uintptr_t)addr 280 }; 281 282 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 283 } 284 285 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id, 286 uint32_t *addr) 287 { 288 struct kvm_one_reg cp0reg = { 289 .id = reg_id, 290 .addr = (uintptr_t)addr 291 }; 292 293 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 294 } 295 296 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id, 297 target_ulong *addr) 298 { 299 uint64_t val64 = *addr; 300 struct kvm_one_reg cp0reg = { 301 .id = reg_id, 302 .addr = (uintptr_t)&val64 303 }; 304 305 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 306 } 307 308 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id, 309 int64_t *addr) 310 { 311 struct kvm_one_reg cp0reg = { 312 .id = reg_id, 313 .addr = (uintptr_t)addr 314 }; 315 316 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 317 } 318 319 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id, 320 uint64_t *addr) 321 { 322 struct kvm_one_reg cp0reg = { 323 .id = reg_id, 324 .addr = (uintptr_t)addr 325 }; 326 327 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 328 } 329 330 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id, 331 int32_t *addr) 332 { 333 struct kvm_one_reg cp0reg = { 334 .id = reg_id, 335 .addr = (uintptr_t)addr 336 }; 337 338 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 339 } 340 341 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id, 342 uint32_t *addr) 343 { 344 struct kvm_one_reg cp0reg = { 345 .id = reg_id, 346 .addr = (uintptr_t)addr 347 }; 348 349 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 350 } 351 352 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id, 353 target_ulong *addr) 354 { 355 int ret; 356 uint64_t val64 = 0; 357 struct kvm_one_reg cp0reg = { 358 .id = reg_id, 359 .addr = (uintptr_t)&val64 360 }; 361 362 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 363 if (ret >= 0) { 364 *addr = val64; 365 } 366 return ret; 367 } 368 369 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id, 370 int64_t *addr) 371 { 372 struct kvm_one_reg cp0reg = { 373 .id = reg_id, 374 .addr = (uintptr_t)addr 375 }; 376 377 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 378 } 379 380 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id, 381 uint64_t *addr) 382 { 383 struct kvm_one_reg cp0reg = { 384 .id = reg_id, 385 .addr = (uintptr_t)addr 386 }; 387 388 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 389 } 390 391 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M) 392 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \ 393 (1U << CP0C1_FP)) 394 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M) 395 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \ 396 (1U << CP0C3_MSAP)) 397 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M) 398 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \ 399 (1U << CP0C5_UFE) | \ 400 (1U << CP0C5_FRE) | \ 401 (1U << CP0C5_UFR)) 402 403 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id, 404 int32_t *addr, int32_t mask) 405 { 406 int err; 407 int32_t tmp, change; 408 409 err = kvm_mips_get_one_reg(cs, reg_id, &tmp); 410 if (err < 0) { 411 return err; 412 } 413 414 /* only change bits in mask */ 415 change = (*addr ^ tmp) & mask; 416 if (!change) { 417 return 0; 418 } 419 420 tmp = tmp ^ change; 421 return kvm_mips_put_one_reg(cs, reg_id, &tmp); 422 } 423 424 /* 425 * We freeze the KVM timer when either the VM clock is stopped or the state is 426 * saved (the state is dirty). 427 */ 428 429 /* 430 * Save the state of the KVM timer when VM clock is stopped or state is synced 431 * to QEMU. 432 */ 433 static int kvm_mips_save_count(CPUState *cs) 434 { 435 MIPSCPU *cpu = MIPS_CPU(cs); 436 CPUMIPSState *env = &cpu->env; 437 uint64_t count_ctl; 438 int err, ret = 0; 439 440 /* freeze KVM timer */ 441 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 442 if (err < 0) { 443 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err); 444 ret = err; 445 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { 446 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; 447 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 448 if (err < 0) { 449 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); 450 ret = err; 451 } 452 } 453 454 /* read CP0_Cause */ 455 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); 456 if (err < 0) { 457 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err); 458 ret = err; 459 } 460 461 /* read CP0_Count */ 462 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); 463 if (err < 0) { 464 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err); 465 ret = err; 466 } 467 468 return ret; 469 } 470 471 /* 472 * Restore the state of the KVM timer when VM clock is restarted or state is 473 * synced to KVM. 474 */ 475 static int kvm_mips_restore_count(CPUState *cs) 476 { 477 MIPSCPU *cpu = MIPS_CPU(cs); 478 CPUMIPSState *env = &cpu->env; 479 uint64_t count_ctl; 480 int err_dc, err, ret = 0; 481 482 /* check the timer is frozen */ 483 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 484 if (err_dc < 0) { 485 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc); 486 ret = err_dc; 487 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { 488 /* freeze timer (sets COUNT_RESUME for us) */ 489 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; 490 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 491 if (err < 0) { 492 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); 493 ret = err; 494 } 495 } 496 497 /* load CP0_Cause */ 498 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); 499 if (err < 0) { 500 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err); 501 ret = err; 502 } 503 504 /* load CP0_Count */ 505 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); 506 if (err < 0) { 507 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err); 508 ret = err; 509 } 510 511 /* resume KVM timer */ 512 if (err_dc >= 0) { 513 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC; 514 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 515 if (err < 0) { 516 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err); 517 ret = err; 518 } 519 } 520 521 return ret; 522 } 523 524 /* 525 * Handle the VM clock being started or stopped 526 */ 527 static void kvm_mips_update_state(void *opaque, int running, RunState state) 528 { 529 CPUState *cs = opaque; 530 int ret; 531 uint64_t count_resume; 532 533 /* 534 * If state is already dirty (synced to QEMU) then the KVM timer state is 535 * already saved and can be restored when it is synced back to KVM. 536 */ 537 if (!running) { 538 if (!cs->kvm_vcpu_dirty) { 539 ret = kvm_mips_save_count(cs); 540 if (ret < 0) { 541 fprintf(stderr, "Failed saving count\n"); 542 } 543 } 544 } else { 545 /* Set clock restore time to now */ 546 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 547 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME, 548 &count_resume); 549 if (ret < 0) { 550 fprintf(stderr, "Failed setting COUNT_RESUME\n"); 551 return; 552 } 553 554 if (!cs->kvm_vcpu_dirty) { 555 ret = kvm_mips_restore_count(cs); 556 if (ret < 0) { 557 fprintf(stderr, "Failed restoring count\n"); 558 } 559 } 560 } 561 } 562 563 static int kvm_mips_put_fpu_registers(CPUState *cs, int level) 564 { 565 MIPSCPU *cpu = MIPS_CPU(cs); 566 CPUMIPSState *env = &cpu->env; 567 int err, ret = 0; 568 unsigned int i; 569 570 /* Only put FPU state if we're emulating a CPU with an FPU */ 571 if (env->CP0_Config1 & (1 << CP0C1_FP)) { 572 /* FPU Control Registers */ 573 if (level == KVM_PUT_FULL_STATE) { 574 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR, 575 &env->active_fpu.fcr0); 576 if (err < 0) { 577 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err); 578 ret = err; 579 } 580 } 581 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, 582 &env->active_fpu.fcr31); 583 if (err < 0) { 584 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err); 585 ret = err; 586 } 587 588 /* 589 * FPU register state is a subset of MSA vector state, so don't put FPU 590 * registers if we're emulating a CPU with MSA. 591 */ 592 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { 593 /* Floating point registers */ 594 for (i = 0; i < 32; ++i) { 595 if (env->CP0_Status & (1 << CP0St_FR)) { 596 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), 597 &env->active_fpu.fpr[i].d); 598 } else { 599 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), 600 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); 601 } 602 if (err < 0) { 603 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err); 604 ret = err; 605 } 606 } 607 } 608 } 609 610 /* Only put MSA state if we're emulating a CPU with MSA */ 611 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { 612 /* MSA Control Registers */ 613 if (level == KVM_PUT_FULL_STATE) { 614 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR, 615 &env->msair); 616 if (err < 0) { 617 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err); 618 ret = err; 619 } 620 } 621 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR, 622 &env->active_tc.msacsr); 623 if (err < 0) { 624 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err); 625 ret = err; 626 } 627 628 /* Vector registers (includes FP registers) */ 629 for (i = 0; i < 32; ++i) { 630 /* Big endian MSA not supported by QEMU yet anyway */ 631 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), 632 env->active_fpu.fpr[i].wr.d); 633 if (err < 0) { 634 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err); 635 ret = err; 636 } 637 } 638 } 639 640 return ret; 641 } 642 643 static int kvm_mips_get_fpu_registers(CPUState *cs) 644 { 645 MIPSCPU *cpu = MIPS_CPU(cs); 646 CPUMIPSState *env = &cpu->env; 647 int err, ret = 0; 648 unsigned int i; 649 650 /* Only get FPU state if we're emulating a CPU with an FPU */ 651 if (env->CP0_Config1 & (1 << CP0C1_FP)) { 652 /* FPU Control Registers */ 653 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR, 654 &env->active_fpu.fcr0); 655 if (err < 0) { 656 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err); 657 ret = err; 658 } 659 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, 660 &env->active_fpu.fcr31); 661 if (err < 0) { 662 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err); 663 ret = err; 664 } else { 665 restore_fp_status(env); 666 } 667 668 /* 669 * FPU register state is a subset of MSA vector state, so don't save FPU 670 * registers if we're emulating a CPU with MSA. 671 */ 672 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { 673 /* Floating point registers */ 674 for (i = 0; i < 32; ++i) { 675 if (env->CP0_Status & (1 << CP0St_FR)) { 676 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), 677 &env->active_fpu.fpr[i].d); 678 } else { 679 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), 680 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); 681 } 682 if (err < 0) { 683 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err); 684 ret = err; 685 } 686 } 687 } 688 } 689 690 /* Only get MSA state if we're emulating a CPU with MSA */ 691 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { 692 /* MSA Control Registers */ 693 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR, 694 &env->msair); 695 if (err < 0) { 696 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err); 697 ret = err; 698 } 699 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR, 700 &env->active_tc.msacsr); 701 if (err < 0) { 702 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err); 703 ret = err; 704 } else { 705 restore_msa_fp_status(env); 706 } 707 708 /* Vector registers (includes FP registers) */ 709 for (i = 0; i < 32; ++i) { 710 /* Big endian MSA not supported by QEMU yet anyway */ 711 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), 712 env->active_fpu.fpr[i].wr.d); 713 if (err < 0) { 714 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err); 715 ret = err; 716 } 717 } 718 } 719 720 return ret; 721 } 722 723 724 static int kvm_mips_put_cp0_registers(CPUState *cs, int level) 725 { 726 MIPSCPU *cpu = MIPS_CPU(cs); 727 CPUMIPSState *env = &cpu->env; 728 int err, ret = 0; 729 730 (void)level; 731 732 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); 733 if (err < 0) { 734 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err); 735 ret = err; 736 } 737 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, 738 &env->CP0_Context); 739 if (err < 0) { 740 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err); 741 ret = err; 742 } 743 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, 744 &env->active_tc.CP0_UserLocal); 745 if (err < 0) { 746 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err); 747 ret = err; 748 } 749 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, 750 &env->CP0_PageMask); 751 if (err < 0) { 752 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err); 753 ret = err; 754 } 755 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); 756 if (err < 0) { 757 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err); 758 ret = err; 759 } 760 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); 761 if (err < 0) { 762 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err); 763 ret = err; 764 } 765 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, 766 &env->CP0_BadVAddr); 767 if (err < 0) { 768 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err); 769 ret = err; 770 } 771 772 /* If VM clock stopped then state will be restored when it is restarted */ 773 if (runstate_is_running()) { 774 err = kvm_mips_restore_count(cs); 775 if (err < 0) { 776 ret = err; 777 } 778 } 779 780 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, 781 &env->CP0_EntryHi); 782 if (err < 0) { 783 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err); 784 ret = err; 785 } 786 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, 787 &env->CP0_Compare); 788 if (err < 0) { 789 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err); 790 ret = err; 791 } 792 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); 793 if (err < 0) { 794 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err); 795 ret = err; 796 } 797 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); 798 if (err < 0) { 799 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err); 800 ret = err; 801 } 802 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); 803 if (err < 0) { 804 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err); 805 ret = err; 806 } 807 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, 808 &env->CP0_Config0, 809 KVM_REG_MIPS_CP0_CONFIG_MASK); 810 if (err < 0) { 811 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err); 812 ret = err; 813 } 814 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, 815 &env->CP0_Config1, 816 KVM_REG_MIPS_CP0_CONFIG1_MASK); 817 if (err < 0) { 818 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err); 819 ret = err; 820 } 821 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, 822 &env->CP0_Config2, 823 KVM_REG_MIPS_CP0_CONFIG2_MASK); 824 if (err < 0) { 825 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err); 826 ret = err; 827 } 828 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, 829 &env->CP0_Config3, 830 KVM_REG_MIPS_CP0_CONFIG3_MASK); 831 if (err < 0) { 832 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err); 833 ret = err; 834 } 835 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, 836 &env->CP0_Config4, 837 KVM_REG_MIPS_CP0_CONFIG4_MASK); 838 if (err < 0) { 839 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err); 840 ret = err; 841 } 842 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, 843 &env->CP0_Config5, 844 KVM_REG_MIPS_CP0_CONFIG5_MASK); 845 if (err < 0) { 846 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err); 847 ret = err; 848 } 849 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, 850 &env->CP0_ErrorEPC); 851 if (err < 0) { 852 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err); 853 ret = err; 854 } 855 856 return ret; 857 } 858 859 static int kvm_mips_get_cp0_registers(CPUState *cs) 860 { 861 MIPSCPU *cpu = MIPS_CPU(cs); 862 CPUMIPSState *env = &cpu->env; 863 int err, ret = 0; 864 865 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); 866 if (err < 0) { 867 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err); 868 ret = err; 869 } 870 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, 871 &env->CP0_Context); 872 if (err < 0) { 873 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err); 874 ret = err; 875 } 876 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, 877 &env->active_tc.CP0_UserLocal); 878 if (err < 0) { 879 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err); 880 ret = err; 881 } 882 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, 883 &env->CP0_PageMask); 884 if (err < 0) { 885 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err); 886 ret = err; 887 } 888 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); 889 if (err < 0) { 890 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err); 891 ret = err; 892 } 893 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); 894 if (err < 0) { 895 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err); 896 ret = err; 897 } 898 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, 899 &env->CP0_BadVAddr); 900 if (err < 0) { 901 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err); 902 ret = err; 903 } 904 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, 905 &env->CP0_EntryHi); 906 if (err < 0) { 907 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err); 908 ret = err; 909 } 910 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, 911 &env->CP0_Compare); 912 if (err < 0) { 913 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err); 914 ret = err; 915 } 916 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); 917 if (err < 0) { 918 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err); 919 ret = err; 920 } 921 922 /* If VM clock stopped then state was already saved when it was stopped */ 923 if (runstate_is_running()) { 924 err = kvm_mips_save_count(cs); 925 if (err < 0) { 926 ret = err; 927 } 928 } 929 930 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); 931 if (err < 0) { 932 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err); 933 ret = err; 934 } 935 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); 936 if (err < 0) { 937 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err); 938 ret = err; 939 } 940 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0); 941 if (err < 0) { 942 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err); 943 ret = err; 944 } 945 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1); 946 if (err < 0) { 947 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err); 948 ret = err; 949 } 950 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2); 951 if (err < 0) { 952 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err); 953 ret = err; 954 } 955 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3); 956 if (err < 0) { 957 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err); 958 ret = err; 959 } 960 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4); 961 if (err < 0) { 962 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err); 963 ret = err; 964 } 965 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5); 966 if (err < 0) { 967 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err); 968 ret = err; 969 } 970 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, 971 &env->CP0_ErrorEPC); 972 if (err < 0) { 973 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err); 974 ret = err; 975 } 976 977 return ret; 978 } 979 980 int kvm_arch_put_registers(CPUState *cs, int level) 981 { 982 MIPSCPU *cpu = MIPS_CPU(cs); 983 CPUMIPSState *env = &cpu->env; 984 struct kvm_regs regs; 985 int ret; 986 int i; 987 988 /* Set the registers based on QEMU's view of things */ 989 for (i = 0; i < 32; i++) { 990 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i]; 991 } 992 993 regs.hi = (int64_t)(target_long)env->active_tc.HI[0]; 994 regs.lo = (int64_t)(target_long)env->active_tc.LO[0]; 995 regs.pc = (int64_t)(target_long)env->active_tc.PC; 996 997 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 998 999 if (ret < 0) { 1000 return ret; 1001 } 1002 1003 ret = kvm_mips_put_cp0_registers(cs, level); 1004 if (ret < 0) { 1005 return ret; 1006 } 1007 1008 ret = kvm_mips_put_fpu_registers(cs, level); 1009 if (ret < 0) { 1010 return ret; 1011 } 1012 1013 return ret; 1014 } 1015 1016 int kvm_arch_get_registers(CPUState *cs) 1017 { 1018 MIPSCPU *cpu = MIPS_CPU(cs); 1019 CPUMIPSState *env = &cpu->env; 1020 int ret = 0; 1021 struct kvm_regs regs; 1022 int i; 1023 1024 /* Get the current register set as KVM seems it */ 1025 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 1026 1027 if (ret < 0) { 1028 return ret; 1029 } 1030 1031 for (i = 0; i < 32; i++) { 1032 env->active_tc.gpr[i] = regs.gpr[i]; 1033 } 1034 1035 env->active_tc.HI[0] = regs.hi; 1036 env->active_tc.LO[0] = regs.lo; 1037 env->active_tc.PC = regs.pc; 1038 1039 kvm_mips_get_cp0_registers(cs); 1040 kvm_mips_get_fpu_registers(cs); 1041 1042 return ret; 1043 } 1044 1045 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 1046 uint64_t address, uint32_t data, PCIDevice *dev) 1047 { 1048 return 0; 1049 } 1050 1051 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 1052 int vector, PCIDevice *dev) 1053 { 1054 return 0; 1055 } 1056 1057 int kvm_arch_release_virq_post(int virq) 1058 { 1059 return 0; 1060 } 1061 1062 int kvm_arch_msi_data_to_gsi(uint32_t data) 1063 { 1064 abort(); 1065 } 1066