1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: Instruction/Exception emulation 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/ktime.h> 15 #include <linux/kvm_host.h> 16 #include <linux/vmalloc.h> 17 #include <linux/fs.h> 18 #include <linux/bootmem.h> 19 #include <linux/random.h> 20 #include <asm/page.h> 21 #include <asm/cacheflush.h> 22 #include <asm/cacheops.h> 23 #include <asm/cpu-info.h> 24 #include <asm/mmu_context.h> 25 #include <asm/tlbflush.h> 26 #include <asm/inst.h> 27 28 #undef CONFIG_MIPS_MT 29 #include <asm/r4kcache.h> 30 #define CONFIG_MIPS_MT 31 32 #include "interrupt.h" 33 #include "commpage.h" 34 35 #include "trace.h" 36 37 /* 38 * Compute the return address and do emulate branch simulation, if required. 39 * This function should be called only in branch delay slot active. 40 */ 41 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, 42 unsigned long *out) 43 { 44 unsigned int dspcontrol; 45 union mips_instruction insn; 46 struct kvm_vcpu_arch *arch = &vcpu->arch; 47 long epc = instpc; 48 long nextpc; 49 int err; 50 51 if (epc & 3) { 52 kvm_err("%s: unaligned epc\n", __func__); 53 return -EINVAL; 54 } 55 56 /* Read the instruction */ 57 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); 58 if (err) 59 return err; 60 61 switch (insn.i_format.opcode) { 62 /* jr and jalr are in r_format format. */ 63 case spec_op: 64 switch (insn.r_format.func) { 65 case jalr_op: 66 arch->gprs[insn.r_format.rd] = epc + 8; 67 /* Fall through */ 68 case jr_op: 69 nextpc = arch->gprs[insn.r_format.rs]; 70 break; 71 default: 72 return -EINVAL; 73 } 74 break; 75 76 /* 77 * This group contains: 78 * bltz_op, bgez_op, bltzl_op, bgezl_op, 79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op. 80 */ 81 case bcond_op: 82 switch (insn.i_format.rt) { 83 case bltz_op: 84 case bltzl_op: 85 if ((long)arch->gprs[insn.i_format.rs] < 0) 86 epc = epc + 4 + (insn.i_format.simmediate << 2); 87 else 88 epc += 8; 89 nextpc = epc; 90 break; 91 92 case bgez_op: 93 case bgezl_op: 94 if ((long)arch->gprs[insn.i_format.rs] >= 0) 95 epc = epc + 4 + (insn.i_format.simmediate << 2); 96 else 97 epc += 8; 98 nextpc = epc; 99 break; 100 101 case bltzal_op: 102 case bltzall_op: 103 arch->gprs[31] = epc + 8; 104 if ((long)arch->gprs[insn.i_format.rs] < 0) 105 epc = epc + 4 + (insn.i_format.simmediate << 2); 106 else 107 epc += 8; 108 nextpc = epc; 109 break; 110 111 case bgezal_op: 112 case bgezall_op: 113 arch->gprs[31] = epc + 8; 114 if ((long)arch->gprs[insn.i_format.rs] >= 0) 115 epc = epc + 4 + (insn.i_format.simmediate << 2); 116 else 117 epc += 8; 118 nextpc = epc; 119 break; 120 case bposge32_op: 121 if (!cpu_has_dsp) { 122 kvm_err("%s: DSP branch but not DSP ASE\n", 123 __func__); 124 return -EINVAL; 125 } 126 127 dspcontrol = rddsp(0x01); 128 129 if (dspcontrol >= 32) 130 epc = epc + 4 + (insn.i_format.simmediate << 2); 131 else 132 epc += 8; 133 nextpc = epc; 134 break; 135 default: 136 return -EINVAL; 137 } 138 break; 139 140 /* These are unconditional and in j_format. */ 141 case jal_op: 142 arch->gprs[31] = instpc + 8; 143 case j_op: 144 epc += 4; 145 epc >>= 28; 146 epc <<= 28; 147 epc |= (insn.j_format.target << 2); 148 nextpc = epc; 149 break; 150 151 /* These are conditional and in i_format. */ 152 case beq_op: 153 case beql_op: 154 if (arch->gprs[insn.i_format.rs] == 155 arch->gprs[insn.i_format.rt]) 156 epc = epc + 4 + (insn.i_format.simmediate << 2); 157 else 158 epc += 8; 159 nextpc = epc; 160 break; 161 162 case bne_op: 163 case bnel_op: 164 if (arch->gprs[insn.i_format.rs] != 165 arch->gprs[insn.i_format.rt]) 166 epc = epc + 4 + (insn.i_format.simmediate << 2); 167 else 168 epc += 8; 169 nextpc = epc; 170 break; 171 172 case blez_op: /* POP06 */ 173 #ifndef CONFIG_CPU_MIPSR6 174 case blezl_op: /* removed in R6 */ 175 #endif 176 if (insn.i_format.rt != 0) 177 goto compact_branch; 178 if ((long)arch->gprs[insn.i_format.rs] <= 0) 179 epc = epc + 4 + (insn.i_format.simmediate << 2); 180 else 181 epc += 8; 182 nextpc = epc; 183 break; 184 185 case bgtz_op: /* POP07 */ 186 #ifndef CONFIG_CPU_MIPSR6 187 case bgtzl_op: /* removed in R6 */ 188 #endif 189 if (insn.i_format.rt != 0) 190 goto compact_branch; 191 if ((long)arch->gprs[insn.i_format.rs] > 0) 192 epc = epc + 4 + (insn.i_format.simmediate << 2); 193 else 194 epc += 8; 195 nextpc = epc; 196 break; 197 198 /* And now the FPA/cp1 branch instructions. */ 199 case cop1_op: 200 kvm_err("%s: unsupported cop1_op\n", __func__); 201 return -EINVAL; 202 203 #ifdef CONFIG_CPU_MIPSR6 204 /* R6 added the following compact branches with forbidden slots */ 205 case blezl_op: /* POP26 */ 206 case bgtzl_op: /* POP27 */ 207 /* only rt == 0 isn't compact branch */ 208 if (insn.i_format.rt != 0) 209 goto compact_branch; 210 return -EINVAL; 211 case pop10_op: 212 case pop30_op: 213 /* only rs == rt == 0 is reserved, rest are compact branches */ 214 if (insn.i_format.rs != 0 || insn.i_format.rt != 0) 215 goto compact_branch; 216 return -EINVAL; 217 case pop66_op: 218 case pop76_op: 219 /* only rs == 0 isn't compact branch */ 220 if (insn.i_format.rs != 0) 221 goto compact_branch; 222 return -EINVAL; 223 compact_branch: 224 /* 225 * If we've hit an exception on the forbidden slot, then 226 * the branch must not have been taken. 227 */ 228 epc += 8; 229 nextpc = epc; 230 break; 231 #else 232 compact_branch: 233 /* Fall through - Compact branches not supported before R6 */ 234 #endif 235 default: 236 return -EINVAL; 237 } 238 239 *out = nextpc; 240 return 0; 241 } 242 243 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) 244 { 245 int err; 246 247 if (cause & CAUSEF_BD) { 248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, 249 &vcpu->arch.pc); 250 if (err) 251 return EMULATE_FAIL; 252 } else { 253 vcpu->arch.pc += 4; 254 } 255 256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); 257 258 return EMULATE_DONE; 259 } 260 261 /** 262 * kvm_get_badinstr() - Get bad instruction encoding. 263 * @opc: Guest pointer to faulting instruction. 264 * @vcpu: KVM VCPU information. 265 * 266 * Gets the instruction encoding of the faulting instruction, using the saved 267 * BadInstr register value if it exists, otherwise falling back to reading guest 268 * memory at @opc. 269 * 270 * Returns: The instruction encoding of the faulting instruction. 271 */ 272 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) 273 { 274 if (cpu_has_badinstr) { 275 *out = vcpu->arch.host_cp0_badinstr; 276 return 0; 277 } else { 278 return kvm_get_inst(opc, vcpu, out); 279 } 280 } 281 282 /** 283 * kvm_get_badinstrp() - Get bad prior instruction encoding. 284 * @opc: Guest pointer to prior faulting instruction. 285 * @vcpu: KVM VCPU information. 286 * 287 * Gets the instruction encoding of the prior faulting instruction (the branch 288 * containing the delay slot which faulted), using the saved BadInstrP register 289 * value if it exists, otherwise falling back to reading guest memory at @opc. 290 * 291 * Returns: The instruction encoding of the prior faulting instruction. 292 */ 293 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) 294 { 295 if (cpu_has_badinstrp) { 296 *out = vcpu->arch.host_cp0_badinstrp; 297 return 0; 298 } else { 299 return kvm_get_inst(opc, vcpu, out); 300 } 301 } 302 303 /** 304 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. 305 * @vcpu: Virtual CPU. 306 * 307 * Returns: 1 if the CP0_Count timer is disabled by either the guest 308 * CP0_Cause.DC bit or the count_ctl.DC bit. 309 * 0 otherwise (in which case CP0_Count timer is running). 310 */ 311 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) 312 { 313 struct mips_coproc *cop0 = vcpu->arch.cop0; 314 315 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || 316 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); 317 } 318 319 /** 320 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. 321 * 322 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. 323 * 324 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 325 */ 326 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) 327 { 328 s64 now_ns, periods; 329 u64 delta; 330 331 now_ns = ktime_to_ns(now); 332 delta = now_ns + vcpu->arch.count_dyn_bias; 333 334 if (delta >= vcpu->arch.count_period) { 335 /* If delta is out of safe range the bias needs adjusting */ 336 periods = div64_s64(now_ns, vcpu->arch.count_period); 337 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; 338 /* Recalculate delta with new bias */ 339 delta = now_ns + vcpu->arch.count_dyn_bias; 340 } 341 342 /* 343 * We've ensured that: 344 * delta < count_period 345 * 346 * Therefore the intermediate delta*count_hz will never overflow since 347 * at the boundary condition: 348 * delta = count_period 349 * delta = NSEC_PER_SEC * 2^32 / count_hz 350 * delta * count_hz = NSEC_PER_SEC * 2^32 351 */ 352 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); 353 } 354 355 /** 356 * kvm_mips_count_time() - Get effective current time. 357 * @vcpu: Virtual CPU. 358 * 359 * Get effective monotonic ktime. This is usually a straightforward ktime_get(), 360 * except when the master disable bit is set in count_ctl, in which case it is 361 * count_resume, i.e. the time that the count was disabled. 362 * 363 * Returns: Effective monotonic ktime for CP0_Count. 364 */ 365 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) 366 { 367 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) 368 return vcpu->arch.count_resume; 369 370 return ktime_get(); 371 } 372 373 /** 374 * kvm_mips_read_count_running() - Read the current count value as if running. 375 * @vcpu: Virtual CPU. 376 * @now: Kernel time to read CP0_Count at. 377 * 378 * Returns the current guest CP0_Count register at time @now and handles if the 379 * timer interrupt is pending and hasn't been handled yet. 380 * 381 * Returns: The current value of the guest CP0_Count register. 382 */ 383 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) 384 { 385 struct mips_coproc *cop0 = vcpu->arch.cop0; 386 ktime_t expires, threshold; 387 u32 count, compare; 388 int running; 389 390 /* Calculate the biased and scaled guest CP0_Count */ 391 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); 392 compare = kvm_read_c0_guest_compare(cop0); 393 394 /* 395 * Find whether CP0_Count has reached the closest timer interrupt. If 396 * not, we shouldn't inject it. 397 */ 398 if ((s32)(count - compare) < 0) 399 return count; 400 401 /* 402 * The CP0_Count we're going to return has already reached the closest 403 * timer interrupt. Quickly check if it really is a new interrupt by 404 * looking at whether the interval until the hrtimer expiry time is 405 * less than 1/4 of the timer period. 406 */ 407 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); 408 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); 409 if (ktime_before(expires, threshold)) { 410 /* 411 * Cancel it while we handle it so there's no chance of 412 * interference with the timeout handler. 413 */ 414 running = hrtimer_cancel(&vcpu->arch.comparecount_timer); 415 416 /* Nothing should be waiting on the timeout */ 417 kvm_mips_callbacks->queue_timer_int(vcpu); 418 419 /* 420 * Restart the timer if it was running based on the expiry time 421 * we read, so that we don't push it back 2 periods. 422 */ 423 if (running) { 424 expires = ktime_add_ns(expires, 425 vcpu->arch.count_period); 426 hrtimer_start(&vcpu->arch.comparecount_timer, expires, 427 HRTIMER_MODE_ABS); 428 } 429 } 430 431 return count; 432 } 433 434 /** 435 * kvm_mips_read_count() - Read the current count value. 436 * @vcpu: Virtual CPU. 437 * 438 * Read the current guest CP0_Count value, taking into account whether the timer 439 * is stopped. 440 * 441 * Returns: The current guest CP0_Count value. 442 */ 443 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) 444 { 445 struct mips_coproc *cop0 = vcpu->arch.cop0; 446 447 /* If count disabled just read static copy of count */ 448 if (kvm_mips_count_disabled(vcpu)) 449 return kvm_read_c0_guest_count(cop0); 450 451 return kvm_mips_read_count_running(vcpu, ktime_get()); 452 } 453 454 /** 455 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. 456 * @vcpu: Virtual CPU. 457 * @count: Output pointer for CP0_Count value at point of freeze. 458 * 459 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value 460 * at the point it was frozen. It is guaranteed that any pending interrupts at 461 * the point it was frozen are handled, and none after that point. 462 * 463 * This is useful where the time/CP0_Count is needed in the calculation of the 464 * new parameters. 465 * 466 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 467 * 468 * Returns: The ktime at the point of freeze. 469 */ 470 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count) 471 { 472 ktime_t now; 473 474 /* stop hrtimer before finding time */ 475 hrtimer_cancel(&vcpu->arch.comparecount_timer); 476 now = ktime_get(); 477 478 /* find count at this point and handle pending hrtimer */ 479 *count = kvm_mips_read_count_running(vcpu, now); 480 481 return now; 482 } 483 484 /** 485 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. 486 * @vcpu: Virtual CPU. 487 * @now: ktime at point of resume. 488 * @count: CP0_Count at point of resume. 489 * 490 * Resumes the timer and updates the timer expiry based on @now and @count. 491 * This can be used in conjunction with kvm_mips_freeze_timer() when timer 492 * parameters need to be changed. 493 * 494 * It is guaranteed that a timer interrupt immediately after resume will be 495 * handled, but not if CP_Compare is exactly at @count. That case is already 496 * handled by kvm_mips_freeze_timer(). 497 * 498 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 499 */ 500 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, 501 ktime_t now, u32 count) 502 { 503 struct mips_coproc *cop0 = vcpu->arch.cop0; 504 u32 compare; 505 u64 delta; 506 ktime_t expire; 507 508 /* Calculate timeout (wrap 0 to 2^32) */ 509 compare = kvm_read_c0_guest_compare(cop0); 510 delta = (u64)(u32)(compare - count - 1) + 1; 511 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); 512 expire = ktime_add_ns(now, delta); 513 514 /* Update hrtimer to use new timeout */ 515 hrtimer_cancel(&vcpu->arch.comparecount_timer); 516 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); 517 } 518 519 /** 520 * kvm_mips_write_count() - Modify the count and update timer. 521 * @vcpu: Virtual CPU. 522 * @count: Guest CP0_Count value to set. 523 * 524 * Sets the CP0_Count value and updates the timer accordingly. 525 */ 526 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) 527 { 528 struct mips_coproc *cop0 = vcpu->arch.cop0; 529 ktime_t now; 530 531 /* Calculate bias */ 532 now = kvm_mips_count_time(vcpu); 533 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); 534 535 if (kvm_mips_count_disabled(vcpu)) 536 /* The timer's disabled, adjust the static count */ 537 kvm_write_c0_guest_count(cop0, count); 538 else 539 /* Update timeout */ 540 kvm_mips_resume_hrtimer(vcpu, now, count); 541 } 542 543 /** 544 * kvm_mips_init_count() - Initialise timer. 545 * @vcpu: Virtual CPU. 546 * 547 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set 548 * it going if it's enabled. 549 */ 550 void kvm_mips_init_count(struct kvm_vcpu *vcpu) 551 { 552 /* 100 MHz */ 553 vcpu->arch.count_hz = 100*1000*1000; 554 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, 555 vcpu->arch.count_hz); 556 vcpu->arch.count_dyn_bias = 0; 557 558 /* Starting at 0 */ 559 kvm_mips_write_count(vcpu, 0); 560 } 561 562 /** 563 * kvm_mips_set_count_hz() - Update the frequency of the timer. 564 * @vcpu: Virtual CPU. 565 * @count_hz: Frequency of CP0_Count timer in Hz. 566 * 567 * Change the frequency of the CP0_Count timer. This is done atomically so that 568 * CP0_Count is continuous and no timer interrupt is lost. 569 * 570 * Returns: -EINVAL if @count_hz is out of range. 571 * 0 on success. 572 */ 573 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) 574 { 575 struct mips_coproc *cop0 = vcpu->arch.cop0; 576 int dc; 577 ktime_t now; 578 u32 count; 579 580 /* ensure the frequency is in a sensible range... */ 581 if (count_hz <= 0 || count_hz > NSEC_PER_SEC) 582 return -EINVAL; 583 /* ... and has actually changed */ 584 if (vcpu->arch.count_hz == count_hz) 585 return 0; 586 587 /* Safely freeze timer so we can keep it continuous */ 588 dc = kvm_mips_count_disabled(vcpu); 589 if (dc) { 590 now = kvm_mips_count_time(vcpu); 591 count = kvm_read_c0_guest_count(cop0); 592 } else { 593 now = kvm_mips_freeze_hrtimer(vcpu, &count); 594 } 595 596 /* Update the frequency */ 597 vcpu->arch.count_hz = count_hz; 598 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); 599 vcpu->arch.count_dyn_bias = 0; 600 601 /* Calculate adjusted bias so dynamic count is unchanged */ 602 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); 603 604 /* Update and resume hrtimer */ 605 if (!dc) 606 kvm_mips_resume_hrtimer(vcpu, now, count); 607 return 0; 608 } 609 610 /** 611 * kvm_mips_write_compare() - Modify compare and update timer. 612 * @vcpu: Virtual CPU. 613 * @compare: New CP0_Compare value. 614 * @ack: Whether to acknowledge timer interrupt. 615 * 616 * Update CP0_Compare to a new value and update the timeout. 617 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure 618 * any pending timer interrupt is preserved. 619 */ 620 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) 621 { 622 struct mips_coproc *cop0 = vcpu->arch.cop0; 623 int dc; 624 u32 old_compare = kvm_read_c0_guest_compare(cop0); 625 ktime_t now; 626 u32 count; 627 628 /* if unchanged, must just be an ack */ 629 if (old_compare == compare) { 630 if (!ack) 631 return; 632 kvm_mips_callbacks->dequeue_timer_int(vcpu); 633 kvm_write_c0_guest_compare(cop0, compare); 634 return; 635 } 636 637 /* freeze_hrtimer() takes care of timer interrupts <= count */ 638 dc = kvm_mips_count_disabled(vcpu); 639 if (!dc) 640 now = kvm_mips_freeze_hrtimer(vcpu, &count); 641 642 if (ack) 643 kvm_mips_callbacks->dequeue_timer_int(vcpu); 644 645 kvm_write_c0_guest_compare(cop0, compare); 646 647 /* resume_hrtimer() takes care of timer interrupts > count */ 648 if (!dc) 649 kvm_mips_resume_hrtimer(vcpu, now, count); 650 } 651 652 /** 653 * kvm_mips_count_disable() - Disable count. 654 * @vcpu: Virtual CPU. 655 * 656 * Disable the CP0_Count timer. A timer interrupt on or before the final stop 657 * time will be handled but not after. 658 * 659 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or 660 * count_ctl.DC has been set (count disabled). 661 * 662 * Returns: The time that the timer was stopped. 663 */ 664 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) 665 { 666 struct mips_coproc *cop0 = vcpu->arch.cop0; 667 u32 count; 668 ktime_t now; 669 670 /* Stop hrtimer */ 671 hrtimer_cancel(&vcpu->arch.comparecount_timer); 672 673 /* Set the static count from the dynamic count, handling pending TI */ 674 now = ktime_get(); 675 count = kvm_mips_read_count_running(vcpu, now); 676 kvm_write_c0_guest_count(cop0, count); 677 678 return now; 679 } 680 681 /** 682 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. 683 * @vcpu: Virtual CPU. 684 * 685 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or 686 * before the final stop time will be handled if the timer isn't disabled by 687 * count_ctl.DC, but not after. 688 * 689 * Assumes CP0_Cause.DC is clear (count enabled). 690 */ 691 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) 692 { 693 struct mips_coproc *cop0 = vcpu->arch.cop0; 694 695 kvm_set_c0_guest_cause(cop0, CAUSEF_DC); 696 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) 697 kvm_mips_count_disable(vcpu); 698 } 699 700 /** 701 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. 702 * @vcpu: Virtual CPU. 703 * 704 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after 705 * the start time will be handled if the timer isn't disabled by count_ctl.DC, 706 * potentially before even returning, so the caller should be careful with 707 * ordering of CP0_Cause modifications so as not to lose it. 708 * 709 * Assumes CP0_Cause.DC is set (count disabled). 710 */ 711 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) 712 { 713 struct mips_coproc *cop0 = vcpu->arch.cop0; 714 u32 count; 715 716 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); 717 718 /* 719 * Set the dynamic count to match the static count. 720 * This starts the hrtimer if count_ctl.DC allows it. 721 * Otherwise it conveniently updates the biases. 722 */ 723 count = kvm_read_c0_guest_count(cop0); 724 kvm_mips_write_count(vcpu, count); 725 } 726 727 /** 728 * kvm_mips_set_count_ctl() - Update the count control KVM register. 729 * @vcpu: Virtual CPU. 730 * @count_ctl: Count control register new value. 731 * 732 * Set the count control KVM register. The timer is updated accordingly. 733 * 734 * Returns: -EINVAL if reserved bits are set. 735 * 0 on success. 736 */ 737 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) 738 { 739 struct mips_coproc *cop0 = vcpu->arch.cop0; 740 s64 changed = count_ctl ^ vcpu->arch.count_ctl; 741 s64 delta; 742 ktime_t expire, now; 743 u32 count, compare; 744 745 /* Only allow defined bits to be changed */ 746 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) 747 return -EINVAL; 748 749 /* Apply new value */ 750 vcpu->arch.count_ctl = count_ctl; 751 752 /* Master CP0_Count disable */ 753 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { 754 /* Is CP0_Cause.DC already disabling CP0_Count? */ 755 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { 756 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) 757 /* Just record the current time */ 758 vcpu->arch.count_resume = ktime_get(); 759 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { 760 /* disable timer and record current time */ 761 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); 762 } else { 763 /* 764 * Calculate timeout relative to static count at resume 765 * time (wrap 0 to 2^32). 766 */ 767 count = kvm_read_c0_guest_count(cop0); 768 compare = kvm_read_c0_guest_compare(cop0); 769 delta = (u64)(u32)(compare - count - 1) + 1; 770 delta = div_u64(delta * NSEC_PER_SEC, 771 vcpu->arch.count_hz); 772 expire = ktime_add_ns(vcpu->arch.count_resume, delta); 773 774 /* Handle pending interrupt */ 775 now = ktime_get(); 776 if (ktime_compare(now, expire) >= 0) 777 /* Nothing should be waiting on the timeout */ 778 kvm_mips_callbacks->queue_timer_int(vcpu); 779 780 /* Resume hrtimer without changing bias */ 781 count = kvm_mips_read_count_running(vcpu, now); 782 kvm_mips_resume_hrtimer(vcpu, now, count); 783 } 784 } 785 786 return 0; 787 } 788 789 /** 790 * kvm_mips_set_count_resume() - Update the count resume KVM register. 791 * @vcpu: Virtual CPU. 792 * @count_resume: Count resume register new value. 793 * 794 * Set the count resume KVM register. 795 * 796 * Returns: -EINVAL if out of valid range (0..now). 797 * 0 on success. 798 */ 799 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) 800 { 801 /* 802 * It doesn't make sense for the resume time to be in the future, as it 803 * would be possible for the next interrupt to be more than a full 804 * period in the future. 805 */ 806 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) 807 return -EINVAL; 808 809 vcpu->arch.count_resume = ns_to_ktime(count_resume); 810 return 0; 811 } 812 813 /** 814 * kvm_mips_count_timeout() - Push timer forward on timeout. 815 * @vcpu: Virtual CPU. 816 * 817 * Handle an hrtimer event by push the hrtimer forward a period. 818 * 819 * Returns: The hrtimer_restart value to return to the hrtimer subsystem. 820 */ 821 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) 822 { 823 /* Add the Count period to the current expiry time */ 824 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, 825 vcpu->arch.count_period); 826 return HRTIMER_RESTART; 827 } 828 829 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) 830 { 831 struct mips_coproc *cop0 = vcpu->arch.cop0; 832 enum emulation_result er = EMULATE_DONE; 833 834 if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { 835 kvm_clear_c0_guest_status(cop0, ST0_ERL); 836 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); 837 } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { 838 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, 839 kvm_read_c0_guest_epc(cop0)); 840 kvm_clear_c0_guest_status(cop0, ST0_EXL); 841 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); 842 843 } else { 844 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", 845 vcpu->arch.pc); 846 er = EMULATE_FAIL; 847 } 848 849 return er; 850 } 851 852 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) 853 { 854 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, 855 vcpu->arch.pending_exceptions); 856 857 ++vcpu->stat.wait_exits; 858 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT); 859 if (!vcpu->arch.pending_exceptions) { 860 vcpu->arch.wait = 1; 861 kvm_vcpu_block(vcpu); 862 863 /* 864 * We we are runnable, then definitely go off to user space to 865 * check if any I/O interrupts are pending. 866 */ 867 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { 868 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 869 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 870 } 871 } 872 873 return EMULATE_DONE; 874 } 875 876 /* 877 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that 878 * we can catch this, if things ever change 879 */ 880 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) 881 { 882 struct mips_coproc *cop0 = vcpu->arch.cop0; 883 unsigned long pc = vcpu->arch.pc; 884 885 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); 886 return EMULATE_FAIL; 887 } 888 889 /** 890 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. 891 * @vcpu: VCPU with changed mappings. 892 * @tlb: TLB entry being removed. 893 * 894 * This is called to indicate a single change in guest MMU mappings, so that we 895 * can arrange TLB flushes on this and other CPUs. 896 */ 897 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, 898 struct kvm_mips_tlb *tlb) 899 { 900 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 901 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 902 int cpu, i; 903 bool user; 904 905 /* No need to flush for entries which are already invalid */ 906 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) 907 return; 908 /* Don't touch host kernel page tables or TLB mappings */ 909 if ((unsigned long)tlb->tlb_hi > 0x7fffffff) 910 return; 911 /* User address space doesn't need flushing for KSeg2/3 changes */ 912 user = tlb->tlb_hi < KVM_GUEST_KSEG0; 913 914 preempt_disable(); 915 916 /* Invalidate page table entries */ 917 kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user); 918 919 /* 920 * Probe the shadow host TLB for the entry being overwritten, if one 921 * matches, invalidate it 922 */ 923 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true); 924 925 /* Invalidate the whole ASID on other CPUs */ 926 cpu = smp_processor_id(); 927 for_each_possible_cpu(i) { 928 if (i == cpu) 929 continue; 930 if (user) 931 cpu_context(i, user_mm) = 0; 932 cpu_context(i, kern_mm) = 0; 933 } 934 935 preempt_enable(); 936 } 937 938 /* Write Guest TLB Entry @ Index */ 939 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) 940 { 941 struct mips_coproc *cop0 = vcpu->arch.cop0; 942 int index = kvm_read_c0_guest_index(cop0); 943 struct kvm_mips_tlb *tlb = NULL; 944 unsigned long pc = vcpu->arch.pc; 945 946 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 947 kvm_debug("%s: illegal index: %d\n", __func__, index); 948 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 949 pc, index, kvm_read_c0_guest_entryhi(cop0), 950 kvm_read_c0_guest_entrylo0(cop0), 951 kvm_read_c0_guest_entrylo1(cop0), 952 kvm_read_c0_guest_pagemask(cop0)); 953 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; 954 } 955 956 tlb = &vcpu->arch.guest_tlb[index]; 957 958 kvm_mips_invalidate_guest_tlb(vcpu, tlb); 959 960 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 961 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 962 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); 963 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); 964 965 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 966 pc, index, kvm_read_c0_guest_entryhi(cop0), 967 kvm_read_c0_guest_entrylo0(cop0), 968 kvm_read_c0_guest_entrylo1(cop0), 969 kvm_read_c0_guest_pagemask(cop0)); 970 971 return EMULATE_DONE; 972 } 973 974 /* Write Guest TLB Entry @ Random Index */ 975 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) 976 { 977 struct mips_coproc *cop0 = vcpu->arch.cop0; 978 struct kvm_mips_tlb *tlb = NULL; 979 unsigned long pc = vcpu->arch.pc; 980 int index; 981 982 get_random_bytes(&index, sizeof(index)); 983 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); 984 985 tlb = &vcpu->arch.guest_tlb[index]; 986 987 kvm_mips_invalidate_guest_tlb(vcpu, tlb); 988 989 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 990 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 991 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); 992 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); 993 994 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", 995 pc, index, kvm_read_c0_guest_entryhi(cop0), 996 kvm_read_c0_guest_entrylo0(cop0), 997 kvm_read_c0_guest_entrylo1(cop0)); 998 999 return EMULATE_DONE; 1000 } 1001 1002 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) 1003 { 1004 struct mips_coproc *cop0 = vcpu->arch.cop0; 1005 long entryhi = kvm_read_c0_guest_entryhi(cop0); 1006 unsigned long pc = vcpu->arch.pc; 1007 int index = -1; 1008 1009 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 1010 1011 kvm_write_c0_guest_index(cop0, index); 1012 1013 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, 1014 index); 1015 1016 return EMULATE_DONE; 1017 } 1018 1019 /** 1020 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 1021 * @vcpu: Virtual CPU. 1022 * 1023 * Finds the mask of bits which are writable in the guest's Config1 CP0 1024 * register, by userland (currently read-only to the guest). 1025 */ 1026 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) 1027 { 1028 unsigned int mask = 0; 1029 1030 /* Permit FPU to be present if FPU is supported */ 1031 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 1032 mask |= MIPS_CONF1_FP; 1033 1034 return mask; 1035 } 1036 1037 /** 1038 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 1039 * @vcpu: Virtual CPU. 1040 * 1041 * Finds the mask of bits which are writable in the guest's Config3 CP0 1042 * register, by userland (currently read-only to the guest). 1043 */ 1044 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) 1045 { 1046 /* Config4 and ULRI are optional */ 1047 unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI; 1048 1049 /* Permit MSA to be present if MSA is supported */ 1050 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 1051 mask |= MIPS_CONF3_MSA; 1052 1053 return mask; 1054 } 1055 1056 /** 1057 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 1058 * @vcpu: Virtual CPU. 1059 * 1060 * Finds the mask of bits which are writable in the guest's Config4 CP0 1061 * register, by userland (currently read-only to the guest). 1062 */ 1063 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) 1064 { 1065 /* Config5 is optional */ 1066 unsigned int mask = MIPS_CONF_M; 1067 1068 /* KScrExist */ 1069 mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT; 1070 1071 return mask; 1072 } 1073 1074 /** 1075 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 1076 * @vcpu: Virtual CPU. 1077 * 1078 * Finds the mask of bits which are writable in the guest's Config5 CP0 1079 * register, by the guest itself. 1080 */ 1081 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) 1082 { 1083 unsigned int mask = 0; 1084 1085 /* Permit MSAEn changes if MSA supported and enabled */ 1086 if (kvm_mips_guest_has_msa(&vcpu->arch)) 1087 mask |= MIPS_CONF5_MSAEN; 1088 1089 /* 1090 * Permit guest FPU mode changes if FPU is enabled and the relevant 1091 * feature exists according to FIR register. 1092 */ 1093 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 1094 if (cpu_has_fre) 1095 mask |= MIPS_CONF5_FRE; 1096 /* We don't support UFR or UFE */ 1097 } 1098 1099 return mask; 1100 } 1101 1102 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, 1103 u32 *opc, u32 cause, 1104 struct kvm_run *run, 1105 struct kvm_vcpu *vcpu) 1106 { 1107 struct mips_coproc *cop0 = vcpu->arch.cop0; 1108 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 1109 enum emulation_result er = EMULATE_DONE; 1110 u32 rt, rd, sel; 1111 unsigned long curr_pc; 1112 int cpu, i; 1113 1114 /* 1115 * Update PC and hold onto current PC in case there is 1116 * an error and we want to rollback the PC 1117 */ 1118 curr_pc = vcpu->arch.pc; 1119 er = update_pc(vcpu, cause); 1120 if (er == EMULATE_FAIL) 1121 return er; 1122 1123 if (inst.co_format.co) { 1124 switch (inst.co_format.func) { 1125 case tlbr_op: /* Read indexed TLB entry */ 1126 er = kvm_mips_emul_tlbr(vcpu); 1127 break; 1128 case tlbwi_op: /* Write indexed */ 1129 er = kvm_mips_emul_tlbwi(vcpu); 1130 break; 1131 case tlbwr_op: /* Write random */ 1132 er = kvm_mips_emul_tlbwr(vcpu); 1133 break; 1134 case tlbp_op: /* TLB Probe */ 1135 er = kvm_mips_emul_tlbp(vcpu); 1136 break; 1137 case rfe_op: 1138 kvm_err("!!!COP0_RFE!!!\n"); 1139 break; 1140 case eret_op: 1141 er = kvm_mips_emul_eret(vcpu); 1142 goto dont_update_pc; 1143 case wait_op: 1144 er = kvm_mips_emul_wait(vcpu); 1145 break; 1146 } 1147 } else { 1148 rt = inst.c0r_format.rt; 1149 rd = inst.c0r_format.rd; 1150 sel = inst.c0r_format.sel; 1151 1152 switch (inst.c0r_format.rs) { 1153 case mfc_op: 1154 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1155 cop0->stat[rd][sel]++; 1156 #endif 1157 /* Get reg */ 1158 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1159 vcpu->arch.gprs[rt] = 1160 (s32)kvm_mips_read_count(vcpu); 1161 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { 1162 vcpu->arch.gprs[rt] = 0x0; 1163 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1164 kvm_mips_trans_mfc0(inst, opc, vcpu); 1165 #endif 1166 } else { 1167 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; 1168 1169 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1170 kvm_mips_trans_mfc0(inst, opc, vcpu); 1171 #endif 1172 } 1173 1174 trace_kvm_hwr(vcpu, KVM_TRACE_MFC0, 1175 KVM_TRACE_COP0(rd, sel), 1176 vcpu->arch.gprs[rt]); 1177 break; 1178 1179 case dmfc_op: 1180 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; 1181 1182 trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0, 1183 KVM_TRACE_COP0(rd, sel), 1184 vcpu->arch.gprs[rt]); 1185 break; 1186 1187 case mtc_op: 1188 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1189 cop0->stat[rd][sel]++; 1190 #endif 1191 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, 1192 KVM_TRACE_COP0(rd, sel), 1193 vcpu->arch.gprs[rt]); 1194 1195 if ((rd == MIPS_CP0_TLB_INDEX) 1196 && (vcpu->arch.gprs[rt] >= 1197 KVM_MIPS_GUEST_TLB_SIZE)) { 1198 kvm_err("Invalid TLB Index: %ld", 1199 vcpu->arch.gprs[rt]); 1200 er = EMULATE_FAIL; 1201 break; 1202 } 1203 if ((rd == MIPS_CP0_PRID) && (sel == 1)) { 1204 /* 1205 * Preserve core number, and keep the exception 1206 * base in guest KSeg0. 1207 */ 1208 kvm_change_c0_guest_ebase(cop0, 0x1ffff000, 1209 vcpu->arch.gprs[rt]); 1210 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 1211 u32 nasid = 1212 vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID; 1213 if (((kvm_read_c0_guest_entryhi(cop0) & 1214 KVM_ENTRYHI_ASID) != nasid)) { 1215 trace_kvm_asid_change(vcpu, 1216 kvm_read_c0_guest_entryhi(cop0) 1217 & KVM_ENTRYHI_ASID, 1218 nasid); 1219 1220 /* 1221 * Flush entries from the GVA page 1222 * tables. 1223 * Guest user page table will get 1224 * flushed lazily on re-entry to guest 1225 * user if the guest ASID actually 1226 * changes. 1227 */ 1228 kvm_mips_flush_gva_pt(kern_mm->pgd, 1229 KMF_KERN); 1230 1231 /* 1232 * Regenerate/invalidate kernel MMU 1233 * context. 1234 * The user MMU context will be 1235 * regenerated lazily on re-entry to 1236 * guest user if the guest ASID actually 1237 * changes. 1238 */ 1239 preempt_disable(); 1240 cpu = smp_processor_id(); 1241 get_new_mmu_context(kern_mm, cpu); 1242 for_each_possible_cpu(i) 1243 if (i != cpu) 1244 cpu_context(i, kern_mm) = 0; 1245 preempt_enable(); 1246 } 1247 kvm_write_c0_guest_entryhi(cop0, 1248 vcpu->arch.gprs[rt]); 1249 } 1250 /* Are we writing to COUNT */ 1251 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1252 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 1253 goto done; 1254 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { 1255 /* If we are writing to COMPARE */ 1256 /* Clear pending timer interrupt, if any */ 1257 kvm_mips_write_compare(vcpu, 1258 vcpu->arch.gprs[rt], 1259 true); 1260 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1261 unsigned int old_val, val, change; 1262 1263 old_val = kvm_read_c0_guest_status(cop0); 1264 val = vcpu->arch.gprs[rt]; 1265 change = val ^ old_val; 1266 1267 /* Make sure that the NMI bit is never set */ 1268 val &= ~ST0_NMI; 1269 1270 /* 1271 * Don't allow CU1 or FR to be set unless FPU 1272 * capability enabled and exists in guest 1273 * configuration. 1274 */ 1275 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1276 val &= ~(ST0_CU1 | ST0_FR); 1277 1278 /* 1279 * Also don't allow FR to be set if host doesn't 1280 * support it. 1281 */ 1282 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 1283 val &= ~ST0_FR; 1284 1285 1286 /* Handle changes in FPU mode */ 1287 preempt_disable(); 1288 1289 /* 1290 * FPU and Vector register state is made 1291 * UNPREDICTABLE by a change of FR, so don't 1292 * even bother saving it. 1293 */ 1294 if (change & ST0_FR) 1295 kvm_drop_fpu(vcpu); 1296 1297 /* 1298 * If MSA state is already live, it is undefined 1299 * how it interacts with FR=0 FPU state, and we 1300 * don't want to hit reserved instruction 1301 * exceptions trying to save the MSA state later 1302 * when CU=1 && FR=1, so play it safe and save 1303 * it first. 1304 */ 1305 if (change & ST0_CU1 && !(val & ST0_FR) && 1306 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1307 kvm_lose_fpu(vcpu); 1308 1309 /* 1310 * Propagate CU1 (FPU enable) changes 1311 * immediately if the FPU context is already 1312 * loaded. When disabling we leave the context 1313 * loaded so it can be quickly enabled again in 1314 * the near future. 1315 */ 1316 if (change & ST0_CU1 && 1317 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1318 change_c0_status(ST0_CU1, val); 1319 1320 preempt_enable(); 1321 1322 kvm_write_c0_guest_status(cop0, val); 1323 1324 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1325 /* 1326 * If FPU present, we need CU1/FR bits to take 1327 * effect fairly soon. 1328 */ 1329 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1330 kvm_mips_trans_mtc0(inst, opc, vcpu); 1331 #endif 1332 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 1333 unsigned int old_val, val, change, wrmask; 1334 1335 old_val = kvm_read_c0_guest_config5(cop0); 1336 val = vcpu->arch.gprs[rt]; 1337 1338 /* Only a few bits are writable in Config5 */ 1339 wrmask = kvm_mips_config5_wrmask(vcpu); 1340 change = (val ^ old_val) & wrmask; 1341 val = old_val ^ change; 1342 1343 1344 /* Handle changes in FPU/MSA modes */ 1345 preempt_disable(); 1346 1347 /* 1348 * Propagate FRE changes immediately if the FPU 1349 * context is already loaded. 1350 */ 1351 if (change & MIPS_CONF5_FRE && 1352 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1353 change_c0_config5(MIPS_CONF5_FRE, val); 1354 1355 /* 1356 * Propagate MSAEn changes immediately if the 1357 * MSA context is already loaded. When disabling 1358 * we leave the context loaded so it can be 1359 * quickly enabled again in the near future. 1360 */ 1361 if (change & MIPS_CONF5_MSAEN && 1362 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1363 change_c0_config5(MIPS_CONF5_MSAEN, 1364 val); 1365 1366 preempt_enable(); 1367 1368 kvm_write_c0_guest_config5(cop0, val); 1369 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1370 u32 old_cause, new_cause; 1371 1372 old_cause = kvm_read_c0_guest_cause(cop0); 1373 new_cause = vcpu->arch.gprs[rt]; 1374 /* Update R/W bits */ 1375 kvm_change_c0_guest_cause(cop0, 0x08800300, 1376 new_cause); 1377 /* DC bit enabling/disabling timer? */ 1378 if ((old_cause ^ new_cause) & CAUSEF_DC) { 1379 if (new_cause & CAUSEF_DC) 1380 kvm_mips_count_disable_cause(vcpu); 1381 else 1382 kvm_mips_count_enable_cause(vcpu); 1383 } 1384 } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) { 1385 u32 mask = MIPS_HWRENA_CPUNUM | 1386 MIPS_HWRENA_SYNCISTEP | 1387 MIPS_HWRENA_CC | 1388 MIPS_HWRENA_CCRES; 1389 1390 if (kvm_read_c0_guest_config3(cop0) & 1391 MIPS_CONF3_ULRI) 1392 mask |= MIPS_HWRENA_ULR; 1393 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; 1394 } else { 1395 cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; 1396 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1397 kvm_mips_trans_mtc0(inst, opc, vcpu); 1398 #endif 1399 } 1400 break; 1401 1402 case dmtc_op: 1403 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", 1404 vcpu->arch.pc, rt, rd, sel); 1405 trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0, 1406 KVM_TRACE_COP0(rd, sel), 1407 vcpu->arch.gprs[rt]); 1408 er = EMULATE_FAIL; 1409 break; 1410 1411 case mfmc0_op: 1412 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS 1413 cop0->stat[MIPS_CP0_STATUS][0]++; 1414 #endif 1415 if (rt != 0) 1416 vcpu->arch.gprs[rt] = 1417 kvm_read_c0_guest_status(cop0); 1418 /* EI */ 1419 if (inst.mfmc0_format.sc) { 1420 kvm_debug("[%#lx] mfmc0_op: EI\n", 1421 vcpu->arch.pc); 1422 kvm_set_c0_guest_status(cop0, ST0_IE); 1423 } else { 1424 kvm_debug("[%#lx] mfmc0_op: DI\n", 1425 vcpu->arch.pc); 1426 kvm_clear_c0_guest_status(cop0, ST0_IE); 1427 } 1428 1429 break; 1430 1431 case wrpgpr_op: 1432 { 1433 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; 1434 u32 pss = 1435 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; 1436 /* 1437 * We don't support any shadow register sets, so 1438 * SRSCtl[PSS] == SRSCtl[CSS] = 0 1439 */ 1440 if (css || pss) { 1441 er = EMULATE_FAIL; 1442 break; 1443 } 1444 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, 1445 vcpu->arch.gprs[rt]); 1446 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; 1447 } 1448 break; 1449 default: 1450 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", 1451 vcpu->arch.pc, inst.c0r_format.rs); 1452 er = EMULATE_FAIL; 1453 break; 1454 } 1455 } 1456 1457 done: 1458 /* Rollback PC only if emulation was unsuccessful */ 1459 if (er == EMULATE_FAIL) 1460 vcpu->arch.pc = curr_pc; 1461 1462 dont_update_pc: 1463 /* 1464 * This is for special instructions whose emulation 1465 * updates the PC, so do not overwrite the PC under 1466 * any circumstances 1467 */ 1468 1469 return er; 1470 } 1471 1472 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, 1473 u32 cause, 1474 struct kvm_run *run, 1475 struct kvm_vcpu *vcpu) 1476 { 1477 enum emulation_result er = EMULATE_DO_MMIO; 1478 u32 rt; 1479 u32 bytes; 1480 void *data = run->mmio.data; 1481 unsigned long curr_pc; 1482 1483 /* 1484 * Update PC and hold onto current PC in case there is 1485 * an error and we want to rollback the PC 1486 */ 1487 curr_pc = vcpu->arch.pc; 1488 er = update_pc(vcpu, cause); 1489 if (er == EMULATE_FAIL) 1490 return er; 1491 1492 rt = inst.i_format.rt; 1493 1494 switch (inst.i_format.opcode) { 1495 case sb_op: 1496 bytes = 1; 1497 if (bytes > sizeof(run->mmio.data)) { 1498 kvm_err("%s: bad MMIO length: %d\n", __func__, 1499 run->mmio.len); 1500 } 1501 run->mmio.phys_addr = 1502 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1503 host_cp0_badvaddr); 1504 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1505 er = EMULATE_FAIL; 1506 break; 1507 } 1508 run->mmio.len = bytes; 1509 run->mmio.is_write = 1; 1510 vcpu->mmio_needed = 1; 1511 vcpu->mmio_is_write = 1; 1512 *(u8 *) data = vcpu->arch.gprs[rt]; 1513 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1514 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], 1515 *(u8 *) data); 1516 1517 break; 1518 1519 case sw_op: 1520 bytes = 4; 1521 if (bytes > sizeof(run->mmio.data)) { 1522 kvm_err("%s: bad MMIO length: %d\n", __func__, 1523 run->mmio.len); 1524 } 1525 run->mmio.phys_addr = 1526 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1527 host_cp0_badvaddr); 1528 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1529 er = EMULATE_FAIL; 1530 break; 1531 } 1532 1533 run->mmio.len = bytes; 1534 run->mmio.is_write = 1; 1535 vcpu->mmio_needed = 1; 1536 vcpu->mmio_is_write = 1; 1537 *(u32 *) data = vcpu->arch.gprs[rt]; 1538 1539 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1540 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1541 vcpu->arch.gprs[rt], *(u32 *) data); 1542 break; 1543 1544 case sh_op: 1545 bytes = 2; 1546 if (bytes > sizeof(run->mmio.data)) { 1547 kvm_err("%s: bad MMIO length: %d\n", __func__, 1548 run->mmio.len); 1549 } 1550 run->mmio.phys_addr = 1551 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1552 host_cp0_badvaddr); 1553 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1554 er = EMULATE_FAIL; 1555 break; 1556 } 1557 1558 run->mmio.len = bytes; 1559 run->mmio.is_write = 1; 1560 vcpu->mmio_needed = 1; 1561 vcpu->mmio_is_write = 1; 1562 *(u16 *) data = vcpu->arch.gprs[rt]; 1563 1564 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1565 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1566 vcpu->arch.gprs[rt], *(u32 *) data); 1567 break; 1568 1569 default: 1570 kvm_err("Store not yet supported (inst=0x%08x)\n", 1571 inst.word); 1572 er = EMULATE_FAIL; 1573 break; 1574 } 1575 1576 /* Rollback PC if emulation was unsuccessful */ 1577 if (er == EMULATE_FAIL) 1578 vcpu->arch.pc = curr_pc; 1579 1580 return er; 1581 } 1582 1583 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, 1584 u32 cause, struct kvm_run *run, 1585 struct kvm_vcpu *vcpu) 1586 { 1587 enum emulation_result er = EMULATE_DO_MMIO; 1588 unsigned long curr_pc; 1589 u32 op, rt; 1590 u32 bytes; 1591 1592 rt = inst.i_format.rt; 1593 op = inst.i_format.opcode; 1594 1595 /* 1596 * Find the resume PC now while we have safe and easy access to the 1597 * prior branch instruction, and save it for 1598 * kvm_mips_complete_mmio_load() to restore later. 1599 */ 1600 curr_pc = vcpu->arch.pc; 1601 er = update_pc(vcpu, cause); 1602 if (er == EMULATE_FAIL) 1603 return er; 1604 vcpu->arch.io_pc = vcpu->arch.pc; 1605 vcpu->arch.pc = curr_pc; 1606 1607 vcpu->arch.io_gpr = rt; 1608 1609 switch (op) { 1610 case lw_op: 1611 bytes = 4; 1612 if (bytes > sizeof(run->mmio.data)) { 1613 kvm_err("%s: bad MMIO length: %d\n", __func__, 1614 run->mmio.len); 1615 er = EMULATE_FAIL; 1616 break; 1617 } 1618 run->mmio.phys_addr = 1619 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1620 host_cp0_badvaddr); 1621 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1622 er = EMULATE_FAIL; 1623 break; 1624 } 1625 1626 run->mmio.len = bytes; 1627 run->mmio.is_write = 0; 1628 vcpu->mmio_needed = 1; 1629 vcpu->mmio_is_write = 0; 1630 break; 1631 1632 case lh_op: 1633 case lhu_op: 1634 bytes = 2; 1635 if (bytes > sizeof(run->mmio.data)) { 1636 kvm_err("%s: bad MMIO length: %d\n", __func__, 1637 run->mmio.len); 1638 er = EMULATE_FAIL; 1639 break; 1640 } 1641 run->mmio.phys_addr = 1642 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1643 host_cp0_badvaddr); 1644 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1645 er = EMULATE_FAIL; 1646 break; 1647 } 1648 1649 run->mmio.len = bytes; 1650 run->mmio.is_write = 0; 1651 vcpu->mmio_needed = 1; 1652 vcpu->mmio_is_write = 0; 1653 1654 if (op == lh_op) 1655 vcpu->mmio_needed = 2; 1656 else 1657 vcpu->mmio_needed = 1; 1658 1659 break; 1660 1661 case lbu_op: 1662 case lb_op: 1663 bytes = 1; 1664 if (bytes > sizeof(run->mmio.data)) { 1665 kvm_err("%s: bad MMIO length: %d\n", __func__, 1666 run->mmio.len); 1667 er = EMULATE_FAIL; 1668 break; 1669 } 1670 run->mmio.phys_addr = 1671 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1672 host_cp0_badvaddr); 1673 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1674 er = EMULATE_FAIL; 1675 break; 1676 } 1677 1678 run->mmio.len = bytes; 1679 run->mmio.is_write = 0; 1680 vcpu->mmio_is_write = 0; 1681 1682 if (op == lb_op) 1683 vcpu->mmio_needed = 2; 1684 else 1685 vcpu->mmio_needed = 1; 1686 1687 break; 1688 1689 default: 1690 kvm_err("Load not yet supported (inst=0x%08x)\n", 1691 inst.word); 1692 er = EMULATE_FAIL; 1693 break; 1694 } 1695 1696 return er; 1697 } 1698 1699 static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), 1700 unsigned long curr_pc, 1701 unsigned long addr, 1702 struct kvm_run *run, 1703 struct kvm_vcpu *vcpu, 1704 u32 cause) 1705 { 1706 int err; 1707 1708 for (;;) { 1709 /* Carefully attempt the cache operation */ 1710 kvm_trap_emul_gva_lockless_begin(vcpu); 1711 err = fn(addr); 1712 kvm_trap_emul_gva_lockless_end(vcpu); 1713 1714 if (likely(!err)) 1715 return EMULATE_DONE; 1716 1717 /* 1718 * Try to handle the fault and retry, maybe we just raced with a 1719 * GVA invalidation. 1720 */ 1721 switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) { 1722 case KVM_MIPS_GVA: 1723 case KVM_MIPS_GPA: 1724 /* bad virtual or physical address */ 1725 return EMULATE_FAIL; 1726 case KVM_MIPS_TLB: 1727 /* no matching guest TLB */ 1728 vcpu->arch.host_cp0_badvaddr = addr; 1729 vcpu->arch.pc = curr_pc; 1730 kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu); 1731 return EMULATE_EXCEPT; 1732 case KVM_MIPS_TLBINV: 1733 /* invalid matching guest TLB */ 1734 vcpu->arch.host_cp0_badvaddr = addr; 1735 vcpu->arch.pc = curr_pc; 1736 kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu); 1737 return EMULATE_EXCEPT; 1738 default: 1739 break; 1740 }; 1741 } 1742 } 1743 1744 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, 1745 u32 *opc, u32 cause, 1746 struct kvm_run *run, 1747 struct kvm_vcpu *vcpu) 1748 { 1749 enum emulation_result er = EMULATE_DONE; 1750 u32 cache, op_inst, op, base; 1751 s16 offset; 1752 struct kvm_vcpu_arch *arch = &vcpu->arch; 1753 unsigned long va; 1754 unsigned long curr_pc; 1755 1756 /* 1757 * Update PC and hold onto current PC in case there is 1758 * an error and we want to rollback the PC 1759 */ 1760 curr_pc = vcpu->arch.pc; 1761 er = update_pc(vcpu, cause); 1762 if (er == EMULATE_FAIL) 1763 return er; 1764 1765 base = inst.i_format.rs; 1766 op_inst = inst.i_format.rt; 1767 if (cpu_has_mips_r6) 1768 offset = inst.spec3_format.simmediate; 1769 else 1770 offset = inst.i_format.simmediate; 1771 cache = op_inst & CacheOp_Cache; 1772 op = op_inst & CacheOp_Op; 1773 1774 va = arch->gprs[base] + offset; 1775 1776 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1777 cache, op, base, arch->gprs[base], offset); 1778 1779 /* 1780 * Treat INDEX_INV as a nop, basically issued by Linux on startup to 1781 * invalidate the caches entirely by stepping through all the 1782 * ways/indexes 1783 */ 1784 if (op == Index_Writeback_Inv) { 1785 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1786 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, 1787 arch->gprs[base], offset); 1788 1789 if (cache == Cache_D) 1790 r4k_blast_dcache(); 1791 else if (cache == Cache_I) 1792 r4k_blast_icache(); 1793 else { 1794 kvm_err("%s: unsupported CACHE INDEX operation\n", 1795 __func__); 1796 return EMULATE_FAIL; 1797 } 1798 1799 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1800 kvm_mips_trans_cache_index(inst, opc, vcpu); 1801 #endif 1802 goto done; 1803 } 1804 1805 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ 1806 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { 1807 /* 1808 * Perform the dcache part of icache synchronisation on the 1809 * guest's behalf. 1810 */ 1811 er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, 1812 curr_pc, va, run, vcpu, cause); 1813 if (er != EMULATE_DONE) 1814 goto done; 1815 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1816 /* 1817 * Replace the CACHE instruction, with a SYNCI, not the same, 1818 * but avoids a trap 1819 */ 1820 kvm_mips_trans_cache_va(inst, opc, vcpu); 1821 #endif 1822 } else if (op_inst == Hit_Invalidate_I) { 1823 /* Perform the icache synchronisation on the guest's behalf */ 1824 er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, 1825 curr_pc, va, run, vcpu, cause); 1826 if (er != EMULATE_DONE) 1827 goto done; 1828 er = kvm_mips_guest_cache_op(protected_flush_icache_line, 1829 curr_pc, va, run, vcpu, cause); 1830 if (er != EMULATE_DONE) 1831 goto done; 1832 1833 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1834 /* Replace the CACHE instruction, with a SYNCI */ 1835 kvm_mips_trans_cache_va(inst, opc, vcpu); 1836 #endif 1837 } else { 1838 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1839 cache, op, base, arch->gprs[base], offset); 1840 er = EMULATE_FAIL; 1841 } 1842 1843 done: 1844 /* Rollback PC only if emulation was unsuccessful */ 1845 if (er == EMULATE_FAIL) 1846 vcpu->arch.pc = curr_pc; 1847 /* Guest exception needs guest to resume */ 1848 if (er == EMULATE_EXCEPT) 1849 er = EMULATE_DONE; 1850 1851 return er; 1852 } 1853 1854 enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, 1855 struct kvm_run *run, 1856 struct kvm_vcpu *vcpu) 1857 { 1858 union mips_instruction inst; 1859 enum emulation_result er = EMULATE_DONE; 1860 int err; 1861 1862 /* Fetch the instruction. */ 1863 if (cause & CAUSEF_BD) 1864 opc += 1; 1865 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1866 if (err) 1867 return EMULATE_FAIL; 1868 1869 switch (inst.r_format.opcode) { 1870 case cop0_op: 1871 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); 1872 break; 1873 case sb_op: 1874 case sh_op: 1875 case sw_op: 1876 er = kvm_mips_emulate_store(inst, cause, run, vcpu); 1877 break; 1878 case lb_op: 1879 case lbu_op: 1880 case lhu_op: 1881 case lh_op: 1882 case lw_op: 1883 er = kvm_mips_emulate_load(inst, cause, run, vcpu); 1884 break; 1885 1886 #ifndef CONFIG_CPU_MIPSR6 1887 case cache_op: 1888 ++vcpu->stat.cache_exits; 1889 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1890 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); 1891 break; 1892 #else 1893 case spec3_op: 1894 switch (inst.spec3_format.func) { 1895 case cache6_op: 1896 ++vcpu->stat.cache_exits; 1897 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1898 er = kvm_mips_emulate_cache(inst, opc, cause, run, 1899 vcpu); 1900 break; 1901 default: 1902 goto unknown; 1903 }; 1904 break; 1905 unknown: 1906 #endif 1907 1908 default: 1909 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, 1910 inst.word); 1911 kvm_arch_vcpu_dump_regs(vcpu); 1912 er = EMULATE_FAIL; 1913 break; 1914 } 1915 1916 return er; 1917 } 1918 1919 /** 1920 * kvm_mips_guest_exception_base() - Find guest exception vector base address. 1921 * 1922 * Returns: The base address of the current guest exception vector, taking 1923 * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account. 1924 */ 1925 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu) 1926 { 1927 struct mips_coproc *cop0 = vcpu->arch.cop0; 1928 1929 if (kvm_read_c0_guest_status(cop0) & ST0_BEV) 1930 return KVM_GUEST_CKSEG1ADDR(0x1fc00200); 1931 else 1932 return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE; 1933 } 1934 1935 enum emulation_result kvm_mips_emulate_syscall(u32 cause, 1936 u32 *opc, 1937 struct kvm_run *run, 1938 struct kvm_vcpu *vcpu) 1939 { 1940 struct mips_coproc *cop0 = vcpu->arch.cop0; 1941 struct kvm_vcpu_arch *arch = &vcpu->arch; 1942 enum emulation_result er = EMULATE_DONE; 1943 1944 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1945 /* save old pc */ 1946 kvm_write_c0_guest_epc(cop0, arch->pc); 1947 kvm_set_c0_guest_status(cop0, ST0_EXL); 1948 1949 if (cause & CAUSEF_BD) 1950 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1951 else 1952 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1953 1954 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); 1955 1956 kvm_change_c0_guest_cause(cop0, (0xff), 1957 (EXCCODE_SYS << CAUSEB_EXCCODE)); 1958 1959 /* Set PC to the exception entry point */ 1960 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 1961 1962 } else { 1963 kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); 1964 er = EMULATE_FAIL; 1965 } 1966 1967 return er; 1968 } 1969 1970 enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, 1971 u32 *opc, 1972 struct kvm_run *run, 1973 struct kvm_vcpu *vcpu) 1974 { 1975 struct mips_coproc *cop0 = vcpu->arch.cop0; 1976 struct kvm_vcpu_arch *arch = &vcpu->arch; 1977 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | 1978 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 1979 1980 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1981 /* save old pc */ 1982 kvm_write_c0_guest_epc(cop0, arch->pc); 1983 kvm_set_c0_guest_status(cop0, ST0_EXL); 1984 1985 if (cause & CAUSEF_BD) 1986 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1987 else 1988 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1989 1990 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", 1991 arch->pc); 1992 1993 /* set pc to the exception entry point */ 1994 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; 1995 1996 } else { 1997 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 1998 arch->pc); 1999 2000 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2001 } 2002 2003 kvm_change_c0_guest_cause(cop0, (0xff), 2004 (EXCCODE_TLBL << CAUSEB_EXCCODE)); 2005 2006 /* setup badvaddr, context and entryhi registers for the guest */ 2007 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2008 /* XXXKYMA: is the context register used by linux??? */ 2009 kvm_write_c0_guest_entryhi(cop0, entryhi); 2010 2011 return EMULATE_DONE; 2012 } 2013 2014 enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, 2015 u32 *opc, 2016 struct kvm_run *run, 2017 struct kvm_vcpu *vcpu) 2018 { 2019 struct mips_coproc *cop0 = vcpu->arch.cop0; 2020 struct kvm_vcpu_arch *arch = &vcpu->arch; 2021 unsigned long entryhi = 2022 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2023 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2024 2025 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2026 /* save old pc */ 2027 kvm_write_c0_guest_epc(cop0, arch->pc); 2028 kvm_set_c0_guest_status(cop0, ST0_EXL); 2029 2030 if (cause & CAUSEF_BD) 2031 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2032 else 2033 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2034 2035 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", 2036 arch->pc); 2037 } else { 2038 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 2039 arch->pc); 2040 } 2041 2042 /* set pc to the exception entry point */ 2043 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2044 2045 kvm_change_c0_guest_cause(cop0, (0xff), 2046 (EXCCODE_TLBL << CAUSEB_EXCCODE)); 2047 2048 /* setup badvaddr, context and entryhi registers for the guest */ 2049 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2050 /* XXXKYMA: is the context register used by linux??? */ 2051 kvm_write_c0_guest_entryhi(cop0, entryhi); 2052 2053 return EMULATE_DONE; 2054 } 2055 2056 enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, 2057 u32 *opc, 2058 struct kvm_run *run, 2059 struct kvm_vcpu *vcpu) 2060 { 2061 struct mips_coproc *cop0 = vcpu->arch.cop0; 2062 struct kvm_vcpu_arch *arch = &vcpu->arch; 2063 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2064 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2065 2066 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2067 /* save old pc */ 2068 kvm_write_c0_guest_epc(cop0, arch->pc); 2069 kvm_set_c0_guest_status(cop0, ST0_EXL); 2070 2071 if (cause & CAUSEF_BD) 2072 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2073 else 2074 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2075 2076 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 2077 arch->pc); 2078 2079 /* Set PC to the exception entry point */ 2080 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; 2081 } else { 2082 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 2083 arch->pc); 2084 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2085 } 2086 2087 kvm_change_c0_guest_cause(cop0, (0xff), 2088 (EXCCODE_TLBS << CAUSEB_EXCCODE)); 2089 2090 /* setup badvaddr, context and entryhi registers for the guest */ 2091 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2092 /* XXXKYMA: is the context register used by linux??? */ 2093 kvm_write_c0_guest_entryhi(cop0, entryhi); 2094 2095 return EMULATE_DONE; 2096 } 2097 2098 enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, 2099 u32 *opc, 2100 struct kvm_run *run, 2101 struct kvm_vcpu *vcpu) 2102 { 2103 struct mips_coproc *cop0 = vcpu->arch.cop0; 2104 struct kvm_vcpu_arch *arch = &vcpu->arch; 2105 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2106 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2107 2108 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2109 /* save old pc */ 2110 kvm_write_c0_guest_epc(cop0, arch->pc); 2111 kvm_set_c0_guest_status(cop0, ST0_EXL); 2112 2113 if (cause & CAUSEF_BD) 2114 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2115 else 2116 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2117 2118 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 2119 arch->pc); 2120 } else { 2121 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 2122 arch->pc); 2123 } 2124 2125 /* Set PC to the exception entry point */ 2126 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2127 2128 kvm_change_c0_guest_cause(cop0, (0xff), 2129 (EXCCODE_TLBS << CAUSEB_EXCCODE)); 2130 2131 /* setup badvaddr, context and entryhi registers for the guest */ 2132 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2133 /* XXXKYMA: is the context register used by linux??? */ 2134 kvm_write_c0_guest_entryhi(cop0, entryhi); 2135 2136 return EMULATE_DONE; 2137 } 2138 2139 enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, 2140 u32 *opc, 2141 struct kvm_run *run, 2142 struct kvm_vcpu *vcpu) 2143 { 2144 struct mips_coproc *cop0 = vcpu->arch.cop0; 2145 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2146 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 2147 struct kvm_vcpu_arch *arch = &vcpu->arch; 2148 2149 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2150 /* save old pc */ 2151 kvm_write_c0_guest_epc(cop0, arch->pc); 2152 kvm_set_c0_guest_status(cop0, ST0_EXL); 2153 2154 if (cause & CAUSEF_BD) 2155 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2156 else 2157 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2158 2159 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", 2160 arch->pc); 2161 } else { 2162 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", 2163 arch->pc); 2164 } 2165 2166 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2167 2168 kvm_change_c0_guest_cause(cop0, (0xff), 2169 (EXCCODE_MOD << CAUSEB_EXCCODE)); 2170 2171 /* setup badvaddr, context and entryhi registers for the guest */ 2172 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2173 /* XXXKYMA: is the context register used by linux??? */ 2174 kvm_write_c0_guest_entryhi(cop0, entryhi); 2175 2176 return EMULATE_DONE; 2177 } 2178 2179 enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, 2180 u32 *opc, 2181 struct kvm_run *run, 2182 struct kvm_vcpu *vcpu) 2183 { 2184 struct mips_coproc *cop0 = vcpu->arch.cop0; 2185 struct kvm_vcpu_arch *arch = &vcpu->arch; 2186 2187 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2188 /* save old pc */ 2189 kvm_write_c0_guest_epc(cop0, arch->pc); 2190 kvm_set_c0_guest_status(cop0, ST0_EXL); 2191 2192 if (cause & CAUSEF_BD) 2193 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2194 else 2195 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2196 2197 } 2198 2199 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2200 2201 kvm_change_c0_guest_cause(cop0, (0xff), 2202 (EXCCODE_CPU << CAUSEB_EXCCODE)); 2203 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); 2204 2205 return EMULATE_DONE; 2206 } 2207 2208 enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, 2209 u32 *opc, 2210 struct kvm_run *run, 2211 struct kvm_vcpu *vcpu) 2212 { 2213 struct mips_coproc *cop0 = vcpu->arch.cop0; 2214 struct kvm_vcpu_arch *arch = &vcpu->arch; 2215 enum emulation_result er = EMULATE_DONE; 2216 2217 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2218 /* save old pc */ 2219 kvm_write_c0_guest_epc(cop0, arch->pc); 2220 kvm_set_c0_guest_status(cop0, ST0_EXL); 2221 2222 if (cause & CAUSEF_BD) 2223 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2224 else 2225 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2226 2227 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); 2228 2229 kvm_change_c0_guest_cause(cop0, (0xff), 2230 (EXCCODE_RI << CAUSEB_EXCCODE)); 2231 2232 /* Set PC to the exception entry point */ 2233 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2234 2235 } else { 2236 kvm_err("Trying to deliver RI when EXL is already set\n"); 2237 er = EMULATE_FAIL; 2238 } 2239 2240 return er; 2241 } 2242 2243 enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, 2244 u32 *opc, 2245 struct kvm_run *run, 2246 struct kvm_vcpu *vcpu) 2247 { 2248 struct mips_coproc *cop0 = vcpu->arch.cop0; 2249 struct kvm_vcpu_arch *arch = &vcpu->arch; 2250 enum emulation_result er = EMULATE_DONE; 2251 2252 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2253 /* save old pc */ 2254 kvm_write_c0_guest_epc(cop0, arch->pc); 2255 kvm_set_c0_guest_status(cop0, ST0_EXL); 2256 2257 if (cause & CAUSEF_BD) 2258 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2259 else 2260 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2261 2262 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); 2263 2264 kvm_change_c0_guest_cause(cop0, (0xff), 2265 (EXCCODE_BP << CAUSEB_EXCCODE)); 2266 2267 /* Set PC to the exception entry point */ 2268 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2269 2270 } else { 2271 kvm_err("Trying to deliver BP when EXL is already set\n"); 2272 er = EMULATE_FAIL; 2273 } 2274 2275 return er; 2276 } 2277 2278 enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, 2279 u32 *opc, 2280 struct kvm_run *run, 2281 struct kvm_vcpu *vcpu) 2282 { 2283 struct mips_coproc *cop0 = vcpu->arch.cop0; 2284 struct kvm_vcpu_arch *arch = &vcpu->arch; 2285 enum emulation_result er = EMULATE_DONE; 2286 2287 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2288 /* save old pc */ 2289 kvm_write_c0_guest_epc(cop0, arch->pc); 2290 kvm_set_c0_guest_status(cop0, ST0_EXL); 2291 2292 if (cause & CAUSEF_BD) 2293 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2294 else 2295 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2296 2297 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); 2298 2299 kvm_change_c0_guest_cause(cop0, (0xff), 2300 (EXCCODE_TR << CAUSEB_EXCCODE)); 2301 2302 /* Set PC to the exception entry point */ 2303 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2304 2305 } else { 2306 kvm_err("Trying to deliver TRAP when EXL is already set\n"); 2307 er = EMULATE_FAIL; 2308 } 2309 2310 return er; 2311 } 2312 2313 enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, 2314 u32 *opc, 2315 struct kvm_run *run, 2316 struct kvm_vcpu *vcpu) 2317 { 2318 struct mips_coproc *cop0 = vcpu->arch.cop0; 2319 struct kvm_vcpu_arch *arch = &vcpu->arch; 2320 enum emulation_result er = EMULATE_DONE; 2321 2322 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2323 /* save old pc */ 2324 kvm_write_c0_guest_epc(cop0, arch->pc); 2325 kvm_set_c0_guest_status(cop0, ST0_EXL); 2326 2327 if (cause & CAUSEF_BD) 2328 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2329 else 2330 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2331 2332 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); 2333 2334 kvm_change_c0_guest_cause(cop0, (0xff), 2335 (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); 2336 2337 /* Set PC to the exception entry point */ 2338 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2339 2340 } else { 2341 kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); 2342 er = EMULATE_FAIL; 2343 } 2344 2345 return er; 2346 } 2347 2348 enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, 2349 u32 *opc, 2350 struct kvm_run *run, 2351 struct kvm_vcpu *vcpu) 2352 { 2353 struct mips_coproc *cop0 = vcpu->arch.cop0; 2354 struct kvm_vcpu_arch *arch = &vcpu->arch; 2355 enum emulation_result er = EMULATE_DONE; 2356 2357 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2358 /* save old pc */ 2359 kvm_write_c0_guest_epc(cop0, arch->pc); 2360 kvm_set_c0_guest_status(cop0, ST0_EXL); 2361 2362 if (cause & CAUSEF_BD) 2363 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2364 else 2365 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2366 2367 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); 2368 2369 kvm_change_c0_guest_cause(cop0, (0xff), 2370 (EXCCODE_FPE << CAUSEB_EXCCODE)); 2371 2372 /* Set PC to the exception entry point */ 2373 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2374 2375 } else { 2376 kvm_err("Trying to deliver FPE when EXL is already set\n"); 2377 er = EMULATE_FAIL; 2378 } 2379 2380 return er; 2381 } 2382 2383 enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, 2384 u32 *opc, 2385 struct kvm_run *run, 2386 struct kvm_vcpu *vcpu) 2387 { 2388 struct mips_coproc *cop0 = vcpu->arch.cop0; 2389 struct kvm_vcpu_arch *arch = &vcpu->arch; 2390 enum emulation_result er = EMULATE_DONE; 2391 2392 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2393 /* save old pc */ 2394 kvm_write_c0_guest_epc(cop0, arch->pc); 2395 kvm_set_c0_guest_status(cop0, ST0_EXL); 2396 2397 if (cause & CAUSEF_BD) 2398 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2399 else 2400 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2401 2402 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); 2403 2404 kvm_change_c0_guest_cause(cop0, (0xff), 2405 (EXCCODE_MSADIS << CAUSEB_EXCCODE)); 2406 2407 /* Set PC to the exception entry point */ 2408 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2409 2410 } else { 2411 kvm_err("Trying to deliver MSADIS when EXL is already set\n"); 2412 er = EMULATE_FAIL; 2413 } 2414 2415 return er; 2416 } 2417 2418 enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, 2419 struct kvm_run *run, 2420 struct kvm_vcpu *vcpu) 2421 { 2422 struct mips_coproc *cop0 = vcpu->arch.cop0; 2423 struct kvm_vcpu_arch *arch = &vcpu->arch; 2424 enum emulation_result er = EMULATE_DONE; 2425 unsigned long curr_pc; 2426 union mips_instruction inst; 2427 int err; 2428 2429 /* 2430 * Update PC and hold onto current PC in case there is 2431 * an error and we want to rollback the PC 2432 */ 2433 curr_pc = vcpu->arch.pc; 2434 er = update_pc(vcpu, cause); 2435 if (er == EMULATE_FAIL) 2436 return er; 2437 2438 /* Fetch the instruction. */ 2439 if (cause & CAUSEF_BD) 2440 opc += 1; 2441 err = kvm_get_badinstr(opc, vcpu, &inst.word); 2442 if (err) { 2443 kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); 2444 return EMULATE_FAIL; 2445 } 2446 2447 if (inst.r_format.opcode == spec3_op && 2448 inst.r_format.func == rdhwr_op && 2449 inst.r_format.rs == 0 && 2450 (inst.r_format.re >> 3) == 0) { 2451 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 2452 int rd = inst.r_format.rd; 2453 int rt = inst.r_format.rt; 2454 int sel = inst.r_format.re & 0x7; 2455 2456 /* If usermode, check RDHWR rd is allowed by guest HWREna */ 2457 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { 2458 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", 2459 rd, opc); 2460 goto emulate_ri; 2461 } 2462 switch (rd) { 2463 case MIPS_HWR_CPUNUM: /* CPU number */ 2464 arch->gprs[rt] = vcpu->vcpu_id; 2465 break; 2466 case MIPS_HWR_SYNCISTEP: /* SYNCI length */ 2467 arch->gprs[rt] = min(current_cpu_data.dcache.linesz, 2468 current_cpu_data.icache.linesz); 2469 break; 2470 case MIPS_HWR_CC: /* Read count register */ 2471 arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); 2472 break; 2473 case MIPS_HWR_CCRES: /* Count register resolution */ 2474 switch (current_cpu_data.cputype) { 2475 case CPU_20KC: 2476 case CPU_25KF: 2477 arch->gprs[rt] = 1; 2478 break; 2479 default: 2480 arch->gprs[rt] = 2; 2481 } 2482 break; 2483 case MIPS_HWR_ULR: /* Read UserLocal register */ 2484 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); 2485 break; 2486 2487 default: 2488 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); 2489 goto emulate_ri; 2490 } 2491 2492 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel), 2493 vcpu->arch.gprs[rt]); 2494 } else { 2495 kvm_debug("Emulate RI not supported @ %p: %#x\n", 2496 opc, inst.word); 2497 goto emulate_ri; 2498 } 2499 2500 return EMULATE_DONE; 2501 2502 emulate_ri: 2503 /* 2504 * Rollback PC (if in branch delay slot then the PC already points to 2505 * branch target), and pass the RI exception to the guest OS. 2506 */ 2507 vcpu->arch.pc = curr_pc; 2508 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); 2509 } 2510 2511 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, 2512 struct kvm_run *run) 2513 { 2514 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; 2515 enum emulation_result er = EMULATE_DONE; 2516 2517 if (run->mmio.len > sizeof(*gpr)) { 2518 kvm_err("Bad MMIO length: %d", run->mmio.len); 2519 er = EMULATE_FAIL; 2520 goto done; 2521 } 2522 2523 /* Restore saved resume PC */ 2524 vcpu->arch.pc = vcpu->arch.io_pc; 2525 2526 switch (run->mmio.len) { 2527 case 4: 2528 *gpr = *(s32 *) run->mmio.data; 2529 break; 2530 2531 case 2: 2532 if (vcpu->mmio_needed == 2) 2533 *gpr = *(s16 *) run->mmio.data; 2534 else 2535 *gpr = *(u16 *)run->mmio.data; 2536 2537 break; 2538 case 1: 2539 if (vcpu->mmio_needed == 2) 2540 *gpr = *(s8 *) run->mmio.data; 2541 else 2542 *gpr = *(u8 *) run->mmio.data; 2543 break; 2544 } 2545 2546 done: 2547 return er; 2548 } 2549 2550 static enum emulation_result kvm_mips_emulate_exc(u32 cause, 2551 u32 *opc, 2552 struct kvm_run *run, 2553 struct kvm_vcpu *vcpu) 2554 { 2555 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2556 struct mips_coproc *cop0 = vcpu->arch.cop0; 2557 struct kvm_vcpu_arch *arch = &vcpu->arch; 2558 enum emulation_result er = EMULATE_DONE; 2559 2560 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2561 /* save old pc */ 2562 kvm_write_c0_guest_epc(cop0, arch->pc); 2563 kvm_set_c0_guest_status(cop0, ST0_EXL); 2564 2565 if (cause & CAUSEF_BD) 2566 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2567 else 2568 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2569 2570 kvm_change_c0_guest_cause(cop0, (0xff), 2571 (exccode << CAUSEB_EXCCODE)); 2572 2573 /* Set PC to the exception entry point */ 2574 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2575 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2576 2577 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", 2578 exccode, kvm_read_c0_guest_epc(cop0), 2579 kvm_read_c0_guest_badvaddr(cop0)); 2580 } else { 2581 kvm_err("Trying to deliver EXC when EXL is already set\n"); 2582 er = EMULATE_FAIL; 2583 } 2584 2585 return er; 2586 } 2587 2588 enum emulation_result kvm_mips_check_privilege(u32 cause, 2589 u32 *opc, 2590 struct kvm_run *run, 2591 struct kvm_vcpu *vcpu) 2592 { 2593 enum emulation_result er = EMULATE_DONE; 2594 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2595 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 2596 2597 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 2598 2599 if (usermode) { 2600 switch (exccode) { 2601 case EXCCODE_INT: 2602 case EXCCODE_SYS: 2603 case EXCCODE_BP: 2604 case EXCCODE_RI: 2605 case EXCCODE_TR: 2606 case EXCCODE_MSAFPE: 2607 case EXCCODE_FPE: 2608 case EXCCODE_MSADIS: 2609 break; 2610 2611 case EXCCODE_CPU: 2612 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) 2613 er = EMULATE_PRIV_FAIL; 2614 break; 2615 2616 case EXCCODE_MOD: 2617 break; 2618 2619 case EXCCODE_TLBL: 2620 /* 2621 * We we are accessing Guest kernel space, then send an 2622 * address error exception to the guest 2623 */ 2624 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2625 kvm_debug("%s: LD MISS @ %#lx\n", __func__, 2626 badvaddr); 2627 cause &= ~0xff; 2628 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE); 2629 er = EMULATE_PRIV_FAIL; 2630 } 2631 break; 2632 2633 case EXCCODE_TLBS: 2634 /* 2635 * We we are accessing Guest kernel space, then send an 2636 * address error exception to the guest 2637 */ 2638 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2639 kvm_debug("%s: ST MISS @ %#lx\n", __func__, 2640 badvaddr); 2641 cause &= ~0xff; 2642 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE); 2643 er = EMULATE_PRIV_FAIL; 2644 } 2645 break; 2646 2647 case EXCCODE_ADES: 2648 kvm_debug("%s: address error ST @ %#lx\n", __func__, 2649 badvaddr); 2650 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2651 cause &= ~0xff; 2652 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE); 2653 } 2654 er = EMULATE_PRIV_FAIL; 2655 break; 2656 case EXCCODE_ADEL: 2657 kvm_debug("%s: address error LD @ %#lx\n", __func__, 2658 badvaddr); 2659 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2660 cause &= ~0xff; 2661 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE); 2662 } 2663 er = EMULATE_PRIV_FAIL; 2664 break; 2665 default: 2666 er = EMULATE_PRIV_FAIL; 2667 break; 2668 } 2669 } 2670 2671 if (er == EMULATE_PRIV_FAIL) 2672 kvm_mips_emulate_exc(cause, opc, run, vcpu); 2673 2674 return er; 2675 } 2676 2677 /* 2678 * User Address (UA) fault, this could happen if 2679 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this 2680 * case we pass on the fault to the guest kernel and let it handle it. 2681 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this 2682 * case we inject the TLB from the Guest TLB into the shadow host TLB 2683 */ 2684 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, 2685 u32 *opc, 2686 struct kvm_run *run, 2687 struct kvm_vcpu *vcpu, 2688 bool write_fault) 2689 { 2690 enum emulation_result er = EMULATE_DONE; 2691 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2692 unsigned long va = vcpu->arch.host_cp0_badvaddr; 2693 int index; 2694 2695 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n", 2696 vcpu->arch.host_cp0_badvaddr); 2697 2698 /* 2699 * KVM would not have got the exception if this entry was valid in the 2700 * shadow host TLB. Check the Guest TLB, if the entry is not there then 2701 * send the guest an exception. The guest exc handler should then inject 2702 * an entry into the guest TLB. 2703 */ 2704 index = kvm_mips_guest_tlb_lookup(vcpu, 2705 (va & VPN2_MASK) | 2706 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & 2707 KVM_ENTRYHI_ASID)); 2708 if (index < 0) { 2709 if (exccode == EXCCODE_TLBL) { 2710 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); 2711 } else if (exccode == EXCCODE_TLBS) { 2712 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); 2713 } else { 2714 kvm_err("%s: invalid exc code: %d\n", __func__, 2715 exccode); 2716 er = EMULATE_FAIL; 2717 } 2718 } else { 2719 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 2720 2721 /* 2722 * Check if the entry is valid, if not then setup a TLB invalid 2723 * exception to the guest 2724 */ 2725 if (!TLB_IS_VALID(*tlb, va)) { 2726 if (exccode == EXCCODE_TLBL) { 2727 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, 2728 vcpu); 2729 } else if (exccode == EXCCODE_TLBS) { 2730 er = kvm_mips_emulate_tlbinv_st(cause, opc, run, 2731 vcpu); 2732 } else { 2733 kvm_err("%s: invalid exc code: %d\n", __func__, 2734 exccode); 2735 er = EMULATE_FAIL; 2736 } 2737 } else { 2738 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", 2739 tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]); 2740 /* 2741 * OK we have a Guest TLB entry, now inject it into the 2742 * shadow host TLB 2743 */ 2744 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va, 2745 write_fault)) { 2746 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", 2747 __func__, va, index, vcpu, 2748 read_c0_entryhi()); 2749 er = EMULATE_FAIL; 2750 } 2751 } 2752 } 2753 2754 return er; 2755 } 2756