1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: Instruction/Exception emulation 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/ktime.h> 15 #include <linux/kvm_host.h> 16 #include <linux/module.h> 17 #include <linux/vmalloc.h> 18 #include <linux/fs.h> 19 #include <linux/bootmem.h> 20 #include <linux/random.h> 21 #include <asm/page.h> 22 #include <asm/cacheflush.h> 23 #include <asm/cpu-info.h> 24 #include <asm/mmu_context.h> 25 #include <asm/tlbflush.h> 26 #include <asm/inst.h> 27 28 #undef CONFIG_MIPS_MT 29 #include <asm/r4kcache.h> 30 #define CONFIG_MIPS_MT 31 32 #include "opcode.h" 33 #include "interrupt.h" 34 #include "commpage.h" 35 36 #include "trace.h" 37 38 /* 39 * Compute the return address and do emulate branch simulation, if required. 40 * This function should be called only in branch delay slot active. 41 */ 42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, 43 unsigned long instpc) 44 { 45 unsigned int dspcontrol; 46 union mips_instruction insn; 47 struct kvm_vcpu_arch *arch = &vcpu->arch; 48 long epc = instpc; 49 long nextpc = KVM_INVALID_INST; 50 51 if (epc & 3) 52 goto unaligned; 53 54 /* Read the instruction */ 55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu); 56 57 if (insn.word == KVM_INVALID_INST) 58 return KVM_INVALID_INST; 59 60 switch (insn.i_format.opcode) { 61 /* jr and jalr are in r_format format. */ 62 case spec_op: 63 switch (insn.r_format.func) { 64 case jalr_op: 65 arch->gprs[insn.r_format.rd] = epc + 8; 66 /* Fall through */ 67 case jr_op: 68 nextpc = arch->gprs[insn.r_format.rs]; 69 break; 70 } 71 break; 72 73 /* 74 * This group contains: 75 * bltz_op, bgez_op, bltzl_op, bgezl_op, 76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op. 77 */ 78 case bcond_op: 79 switch (insn.i_format.rt) { 80 case bltz_op: 81 case bltzl_op: 82 if ((long)arch->gprs[insn.i_format.rs] < 0) 83 epc = epc + 4 + (insn.i_format.simmediate << 2); 84 else 85 epc += 8; 86 nextpc = epc; 87 break; 88 89 case bgez_op: 90 case bgezl_op: 91 if ((long)arch->gprs[insn.i_format.rs] >= 0) 92 epc = epc + 4 + (insn.i_format.simmediate << 2); 93 else 94 epc += 8; 95 nextpc = epc; 96 break; 97 98 case bltzal_op: 99 case bltzall_op: 100 arch->gprs[31] = epc + 8; 101 if ((long)arch->gprs[insn.i_format.rs] < 0) 102 epc = epc + 4 + (insn.i_format.simmediate << 2); 103 else 104 epc += 8; 105 nextpc = epc; 106 break; 107 108 case bgezal_op: 109 case bgezall_op: 110 arch->gprs[31] = epc + 8; 111 if ((long)arch->gprs[insn.i_format.rs] >= 0) 112 epc = epc + 4 + (insn.i_format.simmediate << 2); 113 else 114 epc += 8; 115 nextpc = epc; 116 break; 117 case bposge32_op: 118 if (!cpu_has_dsp) 119 goto sigill; 120 121 dspcontrol = rddsp(0x01); 122 123 if (dspcontrol >= 32) 124 epc = epc + 4 + (insn.i_format.simmediate << 2); 125 else 126 epc += 8; 127 nextpc = epc; 128 break; 129 } 130 break; 131 132 /* These are unconditional and in j_format. */ 133 case jal_op: 134 arch->gprs[31] = instpc + 8; 135 case j_op: 136 epc += 4; 137 epc >>= 28; 138 epc <<= 28; 139 epc |= (insn.j_format.target << 2); 140 nextpc = epc; 141 break; 142 143 /* These are conditional and in i_format. */ 144 case beq_op: 145 case beql_op: 146 if (arch->gprs[insn.i_format.rs] == 147 arch->gprs[insn.i_format.rt]) 148 epc = epc + 4 + (insn.i_format.simmediate << 2); 149 else 150 epc += 8; 151 nextpc = epc; 152 break; 153 154 case bne_op: 155 case bnel_op: 156 if (arch->gprs[insn.i_format.rs] != 157 arch->gprs[insn.i_format.rt]) 158 epc = epc + 4 + (insn.i_format.simmediate << 2); 159 else 160 epc += 8; 161 nextpc = epc; 162 break; 163 164 case blez_op: /* not really i_format */ 165 case blezl_op: 166 /* rt field assumed to be zero */ 167 if ((long)arch->gprs[insn.i_format.rs] <= 0) 168 epc = epc + 4 + (insn.i_format.simmediate << 2); 169 else 170 epc += 8; 171 nextpc = epc; 172 break; 173 174 case bgtz_op: 175 case bgtzl_op: 176 /* rt field assumed to be zero */ 177 if ((long)arch->gprs[insn.i_format.rs] > 0) 178 epc = epc + 4 + (insn.i_format.simmediate << 2); 179 else 180 epc += 8; 181 nextpc = epc; 182 break; 183 184 /* And now the FPA/cp1 branch instructions. */ 185 case cop1_op: 186 kvm_err("%s: unsupported cop1_op\n", __func__); 187 break; 188 } 189 190 return nextpc; 191 192 unaligned: 193 kvm_err("%s: unaligned epc\n", __func__); 194 return nextpc; 195 196 sigill: 197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__); 198 return nextpc; 199 } 200 201 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) 202 { 203 unsigned long branch_pc; 204 enum emulation_result er = EMULATE_DONE; 205 206 if (cause & CAUSEF_BD) { 207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); 208 if (branch_pc == KVM_INVALID_INST) { 209 er = EMULATE_FAIL; 210 } else { 211 vcpu->arch.pc = branch_pc; 212 kvm_debug("BD update_pc(): New PC: %#lx\n", 213 vcpu->arch.pc); 214 } 215 } else 216 vcpu->arch.pc += 4; 217 218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); 219 220 return er; 221 } 222 223 /** 224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. 225 * @vcpu: Virtual CPU. 226 * 227 * Returns: 1 if the CP0_Count timer is disabled by either the guest 228 * CP0_Cause.DC bit or the count_ctl.DC bit. 229 * 0 otherwise (in which case CP0_Count timer is running). 230 */ 231 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) 232 { 233 struct mips_coproc *cop0 = vcpu->arch.cop0; 234 235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || 236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); 237 } 238 239 /** 240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. 241 * 242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. 243 * 244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 245 */ 246 static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) 247 { 248 s64 now_ns, periods; 249 u64 delta; 250 251 now_ns = ktime_to_ns(now); 252 delta = now_ns + vcpu->arch.count_dyn_bias; 253 254 if (delta >= vcpu->arch.count_period) { 255 /* If delta is out of safe range the bias needs adjusting */ 256 periods = div64_s64(now_ns, vcpu->arch.count_period); 257 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; 258 /* Recalculate delta with new bias */ 259 delta = now_ns + vcpu->arch.count_dyn_bias; 260 } 261 262 /* 263 * We've ensured that: 264 * delta < count_period 265 * 266 * Therefore the intermediate delta*count_hz will never overflow since 267 * at the boundary condition: 268 * delta = count_period 269 * delta = NSEC_PER_SEC * 2^32 / count_hz 270 * delta * count_hz = NSEC_PER_SEC * 2^32 271 */ 272 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); 273 } 274 275 /** 276 * kvm_mips_count_time() - Get effective current time. 277 * @vcpu: Virtual CPU. 278 * 279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(), 280 * except when the master disable bit is set in count_ctl, in which case it is 281 * count_resume, i.e. the time that the count was disabled. 282 * 283 * Returns: Effective monotonic ktime for CP0_Count. 284 */ 285 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) 286 { 287 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) 288 return vcpu->arch.count_resume; 289 290 return ktime_get(); 291 } 292 293 /** 294 * kvm_mips_read_count_running() - Read the current count value as if running. 295 * @vcpu: Virtual CPU. 296 * @now: Kernel time to read CP0_Count at. 297 * 298 * Returns the current guest CP0_Count register at time @now and handles if the 299 * timer interrupt is pending and hasn't been handled yet. 300 * 301 * Returns: The current value of the guest CP0_Count register. 302 */ 303 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) 304 { 305 ktime_t expires; 306 int running; 307 308 /* Is the hrtimer pending? */ 309 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); 310 if (ktime_compare(now, expires) >= 0) { 311 /* 312 * Cancel it while we handle it so there's no chance of 313 * interference with the timeout handler. 314 */ 315 running = hrtimer_cancel(&vcpu->arch.comparecount_timer); 316 317 /* Nothing should be waiting on the timeout */ 318 kvm_mips_callbacks->queue_timer_int(vcpu); 319 320 /* 321 * Restart the timer if it was running based on the expiry time 322 * we read, so that we don't push it back 2 periods. 323 */ 324 if (running) { 325 expires = ktime_add_ns(expires, 326 vcpu->arch.count_period); 327 hrtimer_start(&vcpu->arch.comparecount_timer, expires, 328 HRTIMER_MODE_ABS); 329 } 330 } 331 332 /* Return the biased and scaled guest CP0_Count */ 333 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); 334 } 335 336 /** 337 * kvm_mips_read_count() - Read the current count value. 338 * @vcpu: Virtual CPU. 339 * 340 * Read the current guest CP0_Count value, taking into account whether the timer 341 * is stopped. 342 * 343 * Returns: The current guest CP0_Count value. 344 */ 345 uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu) 346 { 347 struct mips_coproc *cop0 = vcpu->arch.cop0; 348 349 /* If count disabled just read static copy of count */ 350 if (kvm_mips_count_disabled(vcpu)) 351 return kvm_read_c0_guest_count(cop0); 352 353 return kvm_mips_read_count_running(vcpu, ktime_get()); 354 } 355 356 /** 357 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. 358 * @vcpu: Virtual CPU. 359 * @count: Output pointer for CP0_Count value at point of freeze. 360 * 361 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value 362 * at the point it was frozen. It is guaranteed that any pending interrupts at 363 * the point it was frozen are handled, and none after that point. 364 * 365 * This is useful where the time/CP0_Count is needed in the calculation of the 366 * new parameters. 367 * 368 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 369 * 370 * Returns: The ktime at the point of freeze. 371 */ 372 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, 373 uint32_t *count) 374 { 375 ktime_t now; 376 377 /* stop hrtimer before finding time */ 378 hrtimer_cancel(&vcpu->arch.comparecount_timer); 379 now = ktime_get(); 380 381 /* find count at this point and handle pending hrtimer */ 382 *count = kvm_mips_read_count_running(vcpu, now); 383 384 return now; 385 } 386 387 /** 388 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. 389 * @vcpu: Virtual CPU. 390 * @now: ktime at point of resume. 391 * @count: CP0_Count at point of resume. 392 * 393 * Resumes the timer and updates the timer expiry based on @now and @count. 394 * This can be used in conjunction with kvm_mips_freeze_timer() when timer 395 * parameters need to be changed. 396 * 397 * It is guaranteed that a timer interrupt immediately after resume will be 398 * handled, but not if CP_Compare is exactly at @count. That case is already 399 * handled by kvm_mips_freeze_timer(). 400 * 401 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 402 */ 403 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, 404 ktime_t now, uint32_t count) 405 { 406 struct mips_coproc *cop0 = vcpu->arch.cop0; 407 uint32_t compare; 408 u64 delta; 409 ktime_t expire; 410 411 /* Calculate timeout (wrap 0 to 2^32) */ 412 compare = kvm_read_c0_guest_compare(cop0); 413 delta = (u64)(uint32_t)(compare - count - 1) + 1; 414 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); 415 expire = ktime_add_ns(now, delta); 416 417 /* Update hrtimer to use new timeout */ 418 hrtimer_cancel(&vcpu->arch.comparecount_timer); 419 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); 420 } 421 422 /** 423 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer. 424 * @vcpu: Virtual CPU. 425 * 426 * Recalculates and updates the expiry time of the hrtimer. This can be used 427 * after timer parameters have been altered which do not depend on the time that 428 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and 429 * kvm_mips_resume_hrtimer() are used directly). 430 * 431 * It is guaranteed that no timer interrupts will be lost in the process. 432 * 433 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). 434 */ 435 static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu) 436 { 437 ktime_t now; 438 uint32_t count; 439 440 /* 441 * freeze_hrtimer takes care of a timer interrupts <= count, and 442 * resume_hrtimer the hrtimer takes care of a timer interrupts > count. 443 */ 444 now = kvm_mips_freeze_hrtimer(vcpu, &count); 445 kvm_mips_resume_hrtimer(vcpu, now, count); 446 } 447 448 /** 449 * kvm_mips_write_count() - Modify the count and update timer. 450 * @vcpu: Virtual CPU. 451 * @count: Guest CP0_Count value to set. 452 * 453 * Sets the CP0_Count value and updates the timer accordingly. 454 */ 455 void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count) 456 { 457 struct mips_coproc *cop0 = vcpu->arch.cop0; 458 ktime_t now; 459 460 /* Calculate bias */ 461 now = kvm_mips_count_time(vcpu); 462 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); 463 464 if (kvm_mips_count_disabled(vcpu)) 465 /* The timer's disabled, adjust the static count */ 466 kvm_write_c0_guest_count(cop0, count); 467 else 468 /* Update timeout */ 469 kvm_mips_resume_hrtimer(vcpu, now, count); 470 } 471 472 /** 473 * kvm_mips_init_count() - Initialise timer. 474 * @vcpu: Virtual CPU. 475 * 476 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set 477 * it going if it's enabled. 478 */ 479 void kvm_mips_init_count(struct kvm_vcpu *vcpu) 480 { 481 /* 100 MHz */ 482 vcpu->arch.count_hz = 100*1000*1000; 483 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, 484 vcpu->arch.count_hz); 485 vcpu->arch.count_dyn_bias = 0; 486 487 /* Starting at 0 */ 488 kvm_mips_write_count(vcpu, 0); 489 } 490 491 /** 492 * kvm_mips_set_count_hz() - Update the frequency of the timer. 493 * @vcpu: Virtual CPU. 494 * @count_hz: Frequency of CP0_Count timer in Hz. 495 * 496 * Change the frequency of the CP0_Count timer. This is done atomically so that 497 * CP0_Count is continuous and no timer interrupt is lost. 498 * 499 * Returns: -EINVAL if @count_hz is out of range. 500 * 0 on success. 501 */ 502 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) 503 { 504 struct mips_coproc *cop0 = vcpu->arch.cop0; 505 int dc; 506 ktime_t now; 507 u32 count; 508 509 /* ensure the frequency is in a sensible range... */ 510 if (count_hz <= 0 || count_hz > NSEC_PER_SEC) 511 return -EINVAL; 512 /* ... and has actually changed */ 513 if (vcpu->arch.count_hz == count_hz) 514 return 0; 515 516 /* Safely freeze timer so we can keep it continuous */ 517 dc = kvm_mips_count_disabled(vcpu); 518 if (dc) { 519 now = kvm_mips_count_time(vcpu); 520 count = kvm_read_c0_guest_count(cop0); 521 } else { 522 now = kvm_mips_freeze_hrtimer(vcpu, &count); 523 } 524 525 /* Update the frequency */ 526 vcpu->arch.count_hz = count_hz; 527 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); 528 vcpu->arch.count_dyn_bias = 0; 529 530 /* Calculate adjusted bias so dynamic count is unchanged */ 531 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); 532 533 /* Update and resume hrtimer */ 534 if (!dc) 535 kvm_mips_resume_hrtimer(vcpu, now, count); 536 return 0; 537 } 538 539 /** 540 * kvm_mips_write_compare() - Modify compare and update timer. 541 * @vcpu: Virtual CPU. 542 * @compare: New CP0_Compare value. 543 * 544 * Update CP0_Compare to a new value and update the timeout. 545 */ 546 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare) 547 { 548 struct mips_coproc *cop0 = vcpu->arch.cop0; 549 550 /* if unchanged, must just be an ack */ 551 if (kvm_read_c0_guest_compare(cop0) == compare) 552 return; 553 554 /* Update compare */ 555 kvm_write_c0_guest_compare(cop0, compare); 556 557 /* Update timeout if count enabled */ 558 if (!kvm_mips_count_disabled(vcpu)) 559 kvm_mips_update_hrtimer(vcpu); 560 } 561 562 /** 563 * kvm_mips_count_disable() - Disable count. 564 * @vcpu: Virtual CPU. 565 * 566 * Disable the CP0_Count timer. A timer interrupt on or before the final stop 567 * time will be handled but not after. 568 * 569 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or 570 * count_ctl.DC has been set (count disabled). 571 * 572 * Returns: The time that the timer was stopped. 573 */ 574 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) 575 { 576 struct mips_coproc *cop0 = vcpu->arch.cop0; 577 uint32_t count; 578 ktime_t now; 579 580 /* Stop hrtimer */ 581 hrtimer_cancel(&vcpu->arch.comparecount_timer); 582 583 /* Set the static count from the dynamic count, handling pending TI */ 584 now = ktime_get(); 585 count = kvm_mips_read_count_running(vcpu, now); 586 kvm_write_c0_guest_count(cop0, count); 587 588 return now; 589 } 590 591 /** 592 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. 593 * @vcpu: Virtual CPU. 594 * 595 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or 596 * before the final stop time will be handled if the timer isn't disabled by 597 * count_ctl.DC, but not after. 598 * 599 * Assumes CP0_Cause.DC is clear (count enabled). 600 */ 601 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) 602 { 603 struct mips_coproc *cop0 = vcpu->arch.cop0; 604 605 kvm_set_c0_guest_cause(cop0, CAUSEF_DC); 606 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) 607 kvm_mips_count_disable(vcpu); 608 } 609 610 /** 611 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. 612 * @vcpu: Virtual CPU. 613 * 614 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after 615 * the start time will be handled if the timer isn't disabled by count_ctl.DC, 616 * potentially before even returning, so the caller should be careful with 617 * ordering of CP0_Cause modifications so as not to lose it. 618 * 619 * Assumes CP0_Cause.DC is set (count disabled). 620 */ 621 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) 622 { 623 struct mips_coproc *cop0 = vcpu->arch.cop0; 624 uint32_t count; 625 626 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); 627 628 /* 629 * Set the dynamic count to match the static count. 630 * This starts the hrtimer if count_ctl.DC allows it. 631 * Otherwise it conveniently updates the biases. 632 */ 633 count = kvm_read_c0_guest_count(cop0); 634 kvm_mips_write_count(vcpu, count); 635 } 636 637 /** 638 * kvm_mips_set_count_ctl() - Update the count control KVM register. 639 * @vcpu: Virtual CPU. 640 * @count_ctl: Count control register new value. 641 * 642 * Set the count control KVM register. The timer is updated accordingly. 643 * 644 * Returns: -EINVAL if reserved bits are set. 645 * 0 on success. 646 */ 647 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) 648 { 649 struct mips_coproc *cop0 = vcpu->arch.cop0; 650 s64 changed = count_ctl ^ vcpu->arch.count_ctl; 651 s64 delta; 652 ktime_t expire, now; 653 uint32_t count, compare; 654 655 /* Only allow defined bits to be changed */ 656 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) 657 return -EINVAL; 658 659 /* Apply new value */ 660 vcpu->arch.count_ctl = count_ctl; 661 662 /* Master CP0_Count disable */ 663 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { 664 /* Is CP0_Cause.DC already disabling CP0_Count? */ 665 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { 666 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) 667 /* Just record the current time */ 668 vcpu->arch.count_resume = ktime_get(); 669 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { 670 /* disable timer and record current time */ 671 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); 672 } else { 673 /* 674 * Calculate timeout relative to static count at resume 675 * time (wrap 0 to 2^32). 676 */ 677 count = kvm_read_c0_guest_count(cop0); 678 compare = kvm_read_c0_guest_compare(cop0); 679 delta = (u64)(uint32_t)(compare - count - 1) + 1; 680 delta = div_u64(delta * NSEC_PER_SEC, 681 vcpu->arch.count_hz); 682 expire = ktime_add_ns(vcpu->arch.count_resume, delta); 683 684 /* Handle pending interrupt */ 685 now = ktime_get(); 686 if (ktime_compare(now, expire) >= 0) 687 /* Nothing should be waiting on the timeout */ 688 kvm_mips_callbacks->queue_timer_int(vcpu); 689 690 /* Resume hrtimer without changing bias */ 691 count = kvm_mips_read_count_running(vcpu, now); 692 kvm_mips_resume_hrtimer(vcpu, now, count); 693 } 694 } 695 696 return 0; 697 } 698 699 /** 700 * kvm_mips_set_count_resume() - Update the count resume KVM register. 701 * @vcpu: Virtual CPU. 702 * @count_resume: Count resume register new value. 703 * 704 * Set the count resume KVM register. 705 * 706 * Returns: -EINVAL if out of valid range (0..now). 707 * 0 on success. 708 */ 709 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) 710 { 711 /* 712 * It doesn't make sense for the resume time to be in the future, as it 713 * would be possible for the next interrupt to be more than a full 714 * period in the future. 715 */ 716 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) 717 return -EINVAL; 718 719 vcpu->arch.count_resume = ns_to_ktime(count_resume); 720 return 0; 721 } 722 723 /** 724 * kvm_mips_count_timeout() - Push timer forward on timeout. 725 * @vcpu: Virtual CPU. 726 * 727 * Handle an hrtimer event by push the hrtimer forward a period. 728 * 729 * Returns: The hrtimer_restart value to return to the hrtimer subsystem. 730 */ 731 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) 732 { 733 /* Add the Count period to the current expiry time */ 734 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, 735 vcpu->arch.count_period); 736 return HRTIMER_RESTART; 737 } 738 739 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) 740 { 741 struct mips_coproc *cop0 = vcpu->arch.cop0; 742 enum emulation_result er = EMULATE_DONE; 743 744 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { 745 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, 746 kvm_read_c0_guest_epc(cop0)); 747 kvm_clear_c0_guest_status(cop0, ST0_EXL); 748 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); 749 750 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { 751 kvm_clear_c0_guest_status(cop0, ST0_ERL); 752 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); 753 } else { 754 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", 755 vcpu->arch.pc); 756 er = EMULATE_FAIL; 757 } 758 759 return er; 760 } 761 762 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) 763 { 764 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, 765 vcpu->arch.pending_exceptions); 766 767 ++vcpu->stat.wait_exits; 768 trace_kvm_exit(vcpu, WAIT_EXITS); 769 if (!vcpu->arch.pending_exceptions) { 770 vcpu->arch.wait = 1; 771 kvm_vcpu_block(vcpu); 772 773 /* 774 * We we are runnable, then definitely go off to user space to 775 * check if any I/O interrupts are pending. 776 */ 777 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { 778 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 779 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 780 } 781 } 782 783 return EMULATE_DONE; 784 } 785 786 /* 787 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that 788 * we can catch this, if things ever change 789 */ 790 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) 791 { 792 struct mips_coproc *cop0 = vcpu->arch.cop0; 793 uint32_t pc = vcpu->arch.pc; 794 795 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); 796 return EMULATE_FAIL; 797 } 798 799 /* Write Guest TLB Entry @ Index */ 800 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) 801 { 802 struct mips_coproc *cop0 = vcpu->arch.cop0; 803 int index = kvm_read_c0_guest_index(cop0); 804 struct kvm_mips_tlb *tlb = NULL; 805 uint32_t pc = vcpu->arch.pc; 806 807 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 808 kvm_debug("%s: illegal index: %d\n", __func__, index); 809 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 810 pc, index, kvm_read_c0_guest_entryhi(cop0), 811 kvm_read_c0_guest_entrylo0(cop0), 812 kvm_read_c0_guest_entrylo1(cop0), 813 kvm_read_c0_guest_pagemask(cop0)); 814 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; 815 } 816 817 tlb = &vcpu->arch.guest_tlb[index]; 818 /* 819 * Probe the shadow host TLB for the entry being overwritten, if one 820 * matches, invalidate it 821 */ 822 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); 823 824 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 825 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 826 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); 827 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); 828 829 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 830 pc, index, kvm_read_c0_guest_entryhi(cop0), 831 kvm_read_c0_guest_entrylo0(cop0), 832 kvm_read_c0_guest_entrylo1(cop0), 833 kvm_read_c0_guest_pagemask(cop0)); 834 835 return EMULATE_DONE; 836 } 837 838 /* Write Guest TLB Entry @ Random Index */ 839 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) 840 { 841 struct mips_coproc *cop0 = vcpu->arch.cop0; 842 struct kvm_mips_tlb *tlb = NULL; 843 uint32_t pc = vcpu->arch.pc; 844 int index; 845 846 get_random_bytes(&index, sizeof(index)); 847 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); 848 849 tlb = &vcpu->arch.guest_tlb[index]; 850 851 /* 852 * Probe the shadow host TLB for the entry being overwritten, if one 853 * matches, invalidate it 854 */ 855 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); 856 857 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 858 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 859 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); 860 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); 861 862 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", 863 pc, index, kvm_read_c0_guest_entryhi(cop0), 864 kvm_read_c0_guest_entrylo0(cop0), 865 kvm_read_c0_guest_entrylo1(cop0)); 866 867 return EMULATE_DONE; 868 } 869 870 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) 871 { 872 struct mips_coproc *cop0 = vcpu->arch.cop0; 873 long entryhi = kvm_read_c0_guest_entryhi(cop0); 874 uint32_t pc = vcpu->arch.pc; 875 int index = -1; 876 877 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 878 879 kvm_write_c0_guest_index(cop0, index); 880 881 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, 882 index); 883 884 return EMULATE_DONE; 885 } 886 887 /** 888 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 889 * @vcpu: Virtual CPU. 890 * 891 * Finds the mask of bits which are writable in the guest's Config1 CP0 892 * register, by userland (currently read-only to the guest). 893 */ 894 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) 895 { 896 unsigned int mask = 0; 897 898 /* Permit FPU to be present if FPU is supported */ 899 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 900 mask |= MIPS_CONF1_FP; 901 902 return mask; 903 } 904 905 /** 906 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 907 * @vcpu: Virtual CPU. 908 * 909 * Finds the mask of bits which are writable in the guest's Config3 CP0 910 * register, by userland (currently read-only to the guest). 911 */ 912 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) 913 { 914 /* Config4 is optional */ 915 unsigned int mask = MIPS_CONF_M; 916 917 /* Permit MSA to be present if MSA is supported */ 918 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 919 mask |= MIPS_CONF3_MSA; 920 921 return mask; 922 } 923 924 /** 925 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 926 * @vcpu: Virtual CPU. 927 * 928 * Finds the mask of bits which are writable in the guest's Config4 CP0 929 * register, by userland (currently read-only to the guest). 930 */ 931 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) 932 { 933 /* Config5 is optional */ 934 return MIPS_CONF_M; 935 } 936 937 /** 938 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 939 * @vcpu: Virtual CPU. 940 * 941 * Finds the mask of bits which are writable in the guest's Config5 CP0 942 * register, by the guest itself. 943 */ 944 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) 945 { 946 unsigned int mask = 0; 947 948 /* Permit MSAEn changes if MSA supported and enabled */ 949 if (kvm_mips_guest_has_msa(&vcpu->arch)) 950 mask |= MIPS_CONF5_MSAEN; 951 952 /* 953 * Permit guest FPU mode changes if FPU is enabled and the relevant 954 * feature exists according to FIR register. 955 */ 956 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 957 if (cpu_has_fre) 958 mask |= MIPS_CONF5_FRE; 959 /* We don't support UFR or UFE */ 960 } 961 962 return mask; 963 } 964 965 enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, 966 uint32_t cause, struct kvm_run *run, 967 struct kvm_vcpu *vcpu) 968 { 969 struct mips_coproc *cop0 = vcpu->arch.cop0; 970 enum emulation_result er = EMULATE_DONE; 971 int32_t rt, rd, copz, sel, co_bit, op; 972 uint32_t pc = vcpu->arch.pc; 973 unsigned long curr_pc; 974 975 /* 976 * Update PC and hold onto current PC in case there is 977 * an error and we want to rollback the PC 978 */ 979 curr_pc = vcpu->arch.pc; 980 er = update_pc(vcpu, cause); 981 if (er == EMULATE_FAIL) 982 return er; 983 984 copz = (inst >> 21) & 0x1f; 985 rt = (inst >> 16) & 0x1f; 986 rd = (inst >> 11) & 0x1f; 987 sel = inst & 0x7; 988 co_bit = (inst >> 25) & 1; 989 990 if (co_bit) { 991 op = (inst) & 0xff; 992 993 switch (op) { 994 case tlbr_op: /* Read indexed TLB entry */ 995 er = kvm_mips_emul_tlbr(vcpu); 996 break; 997 case tlbwi_op: /* Write indexed */ 998 er = kvm_mips_emul_tlbwi(vcpu); 999 break; 1000 case tlbwr_op: /* Write random */ 1001 er = kvm_mips_emul_tlbwr(vcpu); 1002 break; 1003 case tlbp_op: /* TLB Probe */ 1004 er = kvm_mips_emul_tlbp(vcpu); 1005 break; 1006 case rfe_op: 1007 kvm_err("!!!COP0_RFE!!!\n"); 1008 break; 1009 case eret_op: 1010 er = kvm_mips_emul_eret(vcpu); 1011 goto dont_update_pc; 1012 break; 1013 case wait_op: 1014 er = kvm_mips_emul_wait(vcpu); 1015 break; 1016 } 1017 } else { 1018 switch (copz) { 1019 case mfc_op: 1020 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1021 cop0->stat[rd][sel]++; 1022 #endif 1023 /* Get reg */ 1024 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1025 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu); 1026 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { 1027 vcpu->arch.gprs[rt] = 0x0; 1028 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1029 kvm_mips_trans_mfc0(inst, opc, vcpu); 1030 #endif 1031 } else { 1032 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; 1033 1034 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1035 kvm_mips_trans_mfc0(inst, opc, vcpu); 1036 #endif 1037 } 1038 1039 kvm_debug 1040 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", 1041 pc, rd, sel, rt, vcpu->arch.gprs[rt]); 1042 1043 break; 1044 1045 case dmfc_op: 1046 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; 1047 break; 1048 1049 case mtc_op: 1050 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1051 cop0->stat[rd][sel]++; 1052 #endif 1053 if ((rd == MIPS_CP0_TLB_INDEX) 1054 && (vcpu->arch.gprs[rt] >= 1055 KVM_MIPS_GUEST_TLB_SIZE)) { 1056 kvm_err("Invalid TLB Index: %ld", 1057 vcpu->arch.gprs[rt]); 1058 er = EMULATE_FAIL; 1059 break; 1060 } 1061 #define C0_EBASE_CORE_MASK 0xff 1062 if ((rd == MIPS_CP0_PRID) && (sel == 1)) { 1063 /* Preserve CORE number */ 1064 kvm_change_c0_guest_ebase(cop0, 1065 ~(C0_EBASE_CORE_MASK), 1066 vcpu->arch.gprs[rt]); 1067 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", 1068 kvm_read_c0_guest_ebase(cop0)); 1069 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 1070 uint32_t nasid = 1071 vcpu->arch.gprs[rt] & ASID_MASK; 1072 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && 1073 ((kvm_read_c0_guest_entryhi(cop0) & 1074 ASID_MASK) != nasid)) { 1075 kvm_debug("MTCz, change ASID from %#lx to %#lx\n", 1076 kvm_read_c0_guest_entryhi(cop0) 1077 & ASID_MASK, 1078 vcpu->arch.gprs[rt] 1079 & ASID_MASK); 1080 1081 /* Blow away the shadow host TLBs */ 1082 kvm_mips_flush_host_tlb(1); 1083 } 1084 kvm_write_c0_guest_entryhi(cop0, 1085 vcpu->arch.gprs[rt]); 1086 } 1087 /* Are we writing to COUNT */ 1088 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1089 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 1090 goto done; 1091 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { 1092 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", 1093 pc, kvm_read_c0_guest_compare(cop0), 1094 vcpu->arch.gprs[rt]); 1095 1096 /* If we are writing to COMPARE */ 1097 /* Clear pending timer interrupt, if any */ 1098 kvm_mips_callbacks->dequeue_timer_int(vcpu); 1099 kvm_mips_write_compare(vcpu, 1100 vcpu->arch.gprs[rt]); 1101 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1102 unsigned int old_val, val, change; 1103 1104 old_val = kvm_read_c0_guest_status(cop0); 1105 val = vcpu->arch.gprs[rt]; 1106 change = val ^ old_val; 1107 1108 /* Make sure that the NMI bit is never set */ 1109 val &= ~ST0_NMI; 1110 1111 /* 1112 * Don't allow CU1 or FR to be set unless FPU 1113 * capability enabled and exists in guest 1114 * configuration. 1115 */ 1116 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1117 val &= ~(ST0_CU1 | ST0_FR); 1118 1119 /* 1120 * Also don't allow FR to be set if host doesn't 1121 * support it. 1122 */ 1123 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 1124 val &= ~ST0_FR; 1125 1126 1127 /* Handle changes in FPU mode */ 1128 preempt_disable(); 1129 1130 /* 1131 * FPU and Vector register state is made 1132 * UNPREDICTABLE by a change of FR, so don't 1133 * even bother saving it. 1134 */ 1135 if (change & ST0_FR) 1136 kvm_drop_fpu(vcpu); 1137 1138 /* 1139 * If MSA state is already live, it is undefined 1140 * how it interacts with FR=0 FPU state, and we 1141 * don't want to hit reserved instruction 1142 * exceptions trying to save the MSA state later 1143 * when CU=1 && FR=1, so play it safe and save 1144 * it first. 1145 */ 1146 if (change & ST0_CU1 && !(val & ST0_FR) && 1147 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) 1148 kvm_lose_fpu(vcpu); 1149 1150 /* 1151 * Propagate CU1 (FPU enable) changes 1152 * immediately if the FPU context is already 1153 * loaded. When disabling we leave the context 1154 * loaded so it can be quickly enabled again in 1155 * the near future. 1156 */ 1157 if (change & ST0_CU1 && 1158 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) 1159 change_c0_status(ST0_CU1, val); 1160 1161 preempt_enable(); 1162 1163 kvm_write_c0_guest_status(cop0, val); 1164 1165 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1166 /* 1167 * If FPU present, we need CU1/FR bits to take 1168 * effect fairly soon. 1169 */ 1170 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1171 kvm_mips_trans_mtc0(inst, opc, vcpu); 1172 #endif 1173 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 1174 unsigned int old_val, val, change, wrmask; 1175 1176 old_val = kvm_read_c0_guest_config5(cop0); 1177 val = vcpu->arch.gprs[rt]; 1178 1179 /* Only a few bits are writable in Config5 */ 1180 wrmask = kvm_mips_config5_wrmask(vcpu); 1181 change = (val ^ old_val) & wrmask; 1182 val = old_val ^ change; 1183 1184 1185 /* Handle changes in FPU/MSA modes */ 1186 preempt_disable(); 1187 1188 /* 1189 * Propagate FRE changes immediately if the FPU 1190 * context is already loaded. 1191 */ 1192 if (change & MIPS_CONF5_FRE && 1193 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) 1194 change_c0_config5(MIPS_CONF5_FRE, val); 1195 1196 /* 1197 * Propagate MSAEn changes immediately if the 1198 * MSA context is already loaded. When disabling 1199 * we leave the context loaded so it can be 1200 * quickly enabled again in the near future. 1201 */ 1202 if (change & MIPS_CONF5_MSAEN && 1203 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) 1204 change_c0_config5(MIPS_CONF5_MSAEN, 1205 val); 1206 1207 preempt_enable(); 1208 1209 kvm_write_c0_guest_config5(cop0, val); 1210 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1211 uint32_t old_cause, new_cause; 1212 1213 old_cause = kvm_read_c0_guest_cause(cop0); 1214 new_cause = vcpu->arch.gprs[rt]; 1215 /* Update R/W bits */ 1216 kvm_change_c0_guest_cause(cop0, 0x08800300, 1217 new_cause); 1218 /* DC bit enabling/disabling timer? */ 1219 if ((old_cause ^ new_cause) & CAUSEF_DC) { 1220 if (new_cause & CAUSEF_DC) 1221 kvm_mips_count_disable_cause(vcpu); 1222 else 1223 kvm_mips_count_enable_cause(vcpu); 1224 } 1225 } else { 1226 cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; 1227 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1228 kvm_mips_trans_mtc0(inst, opc, vcpu); 1229 #endif 1230 } 1231 1232 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, 1233 rd, sel, cop0->reg[rd][sel]); 1234 break; 1235 1236 case dmtc_op: 1237 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", 1238 vcpu->arch.pc, rt, rd, sel); 1239 er = EMULATE_FAIL; 1240 break; 1241 1242 case mfmcz_op: 1243 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS 1244 cop0->stat[MIPS_CP0_STATUS][0]++; 1245 #endif 1246 if (rt != 0) { 1247 vcpu->arch.gprs[rt] = 1248 kvm_read_c0_guest_status(cop0); 1249 } 1250 /* EI */ 1251 if (inst & 0x20) { 1252 kvm_debug("[%#lx] mfmcz_op: EI\n", 1253 vcpu->arch.pc); 1254 kvm_set_c0_guest_status(cop0, ST0_IE); 1255 } else { 1256 kvm_debug("[%#lx] mfmcz_op: DI\n", 1257 vcpu->arch.pc); 1258 kvm_clear_c0_guest_status(cop0, ST0_IE); 1259 } 1260 1261 break; 1262 1263 case wrpgpr_op: 1264 { 1265 uint32_t css = 1266 cop0->reg[MIPS_CP0_STATUS][2] & 0xf; 1267 uint32_t pss = 1268 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; 1269 /* 1270 * We don't support any shadow register sets, so 1271 * SRSCtl[PSS] == SRSCtl[CSS] = 0 1272 */ 1273 if (css || pss) { 1274 er = EMULATE_FAIL; 1275 break; 1276 } 1277 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, 1278 vcpu->arch.gprs[rt]); 1279 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; 1280 } 1281 break; 1282 default: 1283 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", 1284 vcpu->arch.pc, copz); 1285 er = EMULATE_FAIL; 1286 break; 1287 } 1288 } 1289 1290 done: 1291 /* Rollback PC only if emulation was unsuccessful */ 1292 if (er == EMULATE_FAIL) 1293 vcpu->arch.pc = curr_pc; 1294 1295 dont_update_pc: 1296 /* 1297 * This is for special instructions whose emulation 1298 * updates the PC, so do not overwrite the PC under 1299 * any circumstances 1300 */ 1301 1302 return er; 1303 } 1304 1305 enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause, 1306 struct kvm_run *run, 1307 struct kvm_vcpu *vcpu) 1308 { 1309 enum emulation_result er = EMULATE_DO_MMIO; 1310 int32_t op, base, rt, offset; 1311 uint32_t bytes; 1312 void *data = run->mmio.data; 1313 unsigned long curr_pc; 1314 1315 /* 1316 * Update PC and hold onto current PC in case there is 1317 * an error and we want to rollback the PC 1318 */ 1319 curr_pc = vcpu->arch.pc; 1320 er = update_pc(vcpu, cause); 1321 if (er == EMULATE_FAIL) 1322 return er; 1323 1324 rt = (inst >> 16) & 0x1f; 1325 base = (inst >> 21) & 0x1f; 1326 offset = inst & 0xffff; 1327 op = (inst >> 26) & 0x3f; 1328 1329 switch (op) { 1330 case sb_op: 1331 bytes = 1; 1332 if (bytes > sizeof(run->mmio.data)) { 1333 kvm_err("%s: bad MMIO length: %d\n", __func__, 1334 run->mmio.len); 1335 } 1336 run->mmio.phys_addr = 1337 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1338 host_cp0_badvaddr); 1339 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1340 er = EMULATE_FAIL; 1341 break; 1342 } 1343 run->mmio.len = bytes; 1344 run->mmio.is_write = 1; 1345 vcpu->mmio_needed = 1; 1346 vcpu->mmio_is_write = 1; 1347 *(u8 *) data = vcpu->arch.gprs[rt]; 1348 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1349 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], 1350 *(uint8_t *) data); 1351 1352 break; 1353 1354 case sw_op: 1355 bytes = 4; 1356 if (bytes > sizeof(run->mmio.data)) { 1357 kvm_err("%s: bad MMIO length: %d\n", __func__, 1358 run->mmio.len); 1359 } 1360 run->mmio.phys_addr = 1361 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1362 host_cp0_badvaddr); 1363 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1364 er = EMULATE_FAIL; 1365 break; 1366 } 1367 1368 run->mmio.len = bytes; 1369 run->mmio.is_write = 1; 1370 vcpu->mmio_needed = 1; 1371 vcpu->mmio_is_write = 1; 1372 *(uint32_t *) data = vcpu->arch.gprs[rt]; 1373 1374 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1375 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1376 vcpu->arch.gprs[rt], *(uint32_t *) data); 1377 break; 1378 1379 case sh_op: 1380 bytes = 2; 1381 if (bytes > sizeof(run->mmio.data)) { 1382 kvm_err("%s: bad MMIO length: %d\n", __func__, 1383 run->mmio.len); 1384 } 1385 run->mmio.phys_addr = 1386 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1387 host_cp0_badvaddr); 1388 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1389 er = EMULATE_FAIL; 1390 break; 1391 } 1392 1393 run->mmio.len = bytes; 1394 run->mmio.is_write = 1; 1395 vcpu->mmio_needed = 1; 1396 vcpu->mmio_is_write = 1; 1397 *(uint16_t *) data = vcpu->arch.gprs[rt]; 1398 1399 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", 1400 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1401 vcpu->arch.gprs[rt], *(uint32_t *) data); 1402 break; 1403 1404 default: 1405 kvm_err("Store not yet supported"); 1406 er = EMULATE_FAIL; 1407 break; 1408 } 1409 1410 /* Rollback PC if emulation was unsuccessful */ 1411 if (er == EMULATE_FAIL) 1412 vcpu->arch.pc = curr_pc; 1413 1414 return er; 1415 } 1416 1417 enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, 1418 struct kvm_run *run, 1419 struct kvm_vcpu *vcpu) 1420 { 1421 enum emulation_result er = EMULATE_DO_MMIO; 1422 int32_t op, base, rt, offset; 1423 uint32_t bytes; 1424 1425 rt = (inst >> 16) & 0x1f; 1426 base = (inst >> 21) & 0x1f; 1427 offset = inst & 0xffff; 1428 op = (inst >> 26) & 0x3f; 1429 1430 vcpu->arch.pending_load_cause = cause; 1431 vcpu->arch.io_gpr = rt; 1432 1433 switch (op) { 1434 case lw_op: 1435 bytes = 4; 1436 if (bytes > sizeof(run->mmio.data)) { 1437 kvm_err("%s: bad MMIO length: %d\n", __func__, 1438 run->mmio.len); 1439 er = EMULATE_FAIL; 1440 break; 1441 } 1442 run->mmio.phys_addr = 1443 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1444 host_cp0_badvaddr); 1445 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1446 er = EMULATE_FAIL; 1447 break; 1448 } 1449 1450 run->mmio.len = bytes; 1451 run->mmio.is_write = 0; 1452 vcpu->mmio_needed = 1; 1453 vcpu->mmio_is_write = 0; 1454 break; 1455 1456 case lh_op: 1457 case lhu_op: 1458 bytes = 2; 1459 if (bytes > sizeof(run->mmio.data)) { 1460 kvm_err("%s: bad MMIO length: %d\n", __func__, 1461 run->mmio.len); 1462 er = EMULATE_FAIL; 1463 break; 1464 } 1465 run->mmio.phys_addr = 1466 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1467 host_cp0_badvaddr); 1468 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1469 er = EMULATE_FAIL; 1470 break; 1471 } 1472 1473 run->mmio.len = bytes; 1474 run->mmio.is_write = 0; 1475 vcpu->mmio_needed = 1; 1476 vcpu->mmio_is_write = 0; 1477 1478 if (op == lh_op) 1479 vcpu->mmio_needed = 2; 1480 else 1481 vcpu->mmio_needed = 1; 1482 1483 break; 1484 1485 case lbu_op: 1486 case lb_op: 1487 bytes = 1; 1488 if (bytes > sizeof(run->mmio.data)) { 1489 kvm_err("%s: bad MMIO length: %d\n", __func__, 1490 run->mmio.len); 1491 er = EMULATE_FAIL; 1492 break; 1493 } 1494 run->mmio.phys_addr = 1495 kvm_mips_callbacks->gva_to_gpa(vcpu->arch. 1496 host_cp0_badvaddr); 1497 if (run->mmio.phys_addr == KVM_INVALID_ADDR) { 1498 er = EMULATE_FAIL; 1499 break; 1500 } 1501 1502 run->mmio.len = bytes; 1503 run->mmio.is_write = 0; 1504 vcpu->mmio_is_write = 0; 1505 1506 if (op == lb_op) 1507 vcpu->mmio_needed = 2; 1508 else 1509 vcpu->mmio_needed = 1; 1510 1511 break; 1512 1513 default: 1514 kvm_err("Load not yet supported"); 1515 er = EMULATE_FAIL; 1516 break; 1517 } 1518 1519 return er; 1520 } 1521 1522 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) 1523 { 1524 unsigned long offset = (va & ~PAGE_MASK); 1525 struct kvm *kvm = vcpu->kvm; 1526 unsigned long pa; 1527 gfn_t gfn; 1528 pfn_t pfn; 1529 1530 gfn = va >> PAGE_SHIFT; 1531 1532 if (gfn >= kvm->arch.guest_pmap_npages) { 1533 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn); 1534 kvm_mips_dump_host_tlbs(); 1535 kvm_arch_vcpu_dump_regs(vcpu); 1536 return -1; 1537 } 1538 pfn = kvm->arch.guest_pmap[gfn]; 1539 pa = (pfn << PAGE_SHIFT) | offset; 1540 1541 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va, 1542 CKSEG0ADDR(pa)); 1543 1544 local_flush_icache_range(CKSEG0ADDR(pa), 32); 1545 return 0; 1546 } 1547 1548 #define MIPS_CACHE_OP_INDEX_INV 0x0 1549 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1 1550 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2 1551 #define MIPS_CACHE_OP_IMP 0x3 1552 #define MIPS_CACHE_OP_HIT_INV 0x4 1553 #define MIPS_CACHE_OP_FILL_WB_INV 0x5 1554 #define MIPS_CACHE_OP_HIT_HB 0x6 1555 #define MIPS_CACHE_OP_FETCH_LOCK 0x7 1556 1557 #define MIPS_CACHE_ICACHE 0x0 1558 #define MIPS_CACHE_DCACHE 0x1 1559 #define MIPS_CACHE_SEC 0x3 1560 1561 enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, 1562 uint32_t cause, 1563 struct kvm_run *run, 1564 struct kvm_vcpu *vcpu) 1565 { 1566 struct mips_coproc *cop0 = vcpu->arch.cop0; 1567 enum emulation_result er = EMULATE_DONE; 1568 int32_t offset, cache, op_inst, op, base; 1569 struct kvm_vcpu_arch *arch = &vcpu->arch; 1570 unsigned long va; 1571 unsigned long curr_pc; 1572 1573 /* 1574 * Update PC and hold onto current PC in case there is 1575 * an error and we want to rollback the PC 1576 */ 1577 curr_pc = vcpu->arch.pc; 1578 er = update_pc(vcpu, cause); 1579 if (er == EMULATE_FAIL) 1580 return er; 1581 1582 base = (inst >> 21) & 0x1f; 1583 op_inst = (inst >> 16) & 0x1f; 1584 offset = inst & 0xffff; 1585 cache = (inst >> 16) & 0x3; 1586 op = (inst >> 18) & 0x7; 1587 1588 va = arch->gprs[base] + offset; 1589 1590 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1591 cache, op, base, arch->gprs[base], offset); 1592 1593 /* 1594 * Treat INDEX_INV as a nop, basically issued by Linux on startup to 1595 * invalidate the caches entirely by stepping through all the 1596 * ways/indexes 1597 */ 1598 if (op == MIPS_CACHE_OP_INDEX_INV) { 1599 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1600 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, 1601 arch->gprs[base], offset); 1602 1603 if (cache == MIPS_CACHE_DCACHE) 1604 r4k_blast_dcache(); 1605 else if (cache == MIPS_CACHE_ICACHE) 1606 r4k_blast_icache(); 1607 else { 1608 kvm_err("%s: unsupported CACHE INDEX operation\n", 1609 __func__); 1610 return EMULATE_FAIL; 1611 } 1612 1613 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1614 kvm_mips_trans_cache_index(inst, opc, vcpu); 1615 #endif 1616 goto done; 1617 } 1618 1619 preempt_disable(); 1620 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { 1621 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) 1622 kvm_mips_handle_kseg0_tlb_fault(va, vcpu); 1623 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || 1624 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { 1625 int index; 1626 1627 /* If an entry already exists then skip */ 1628 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) 1629 goto skip_fault; 1630 1631 /* 1632 * If address not in the guest TLB, then give the guest a fault, 1633 * the resulting handler will do the right thing 1634 */ 1635 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | 1636 (kvm_read_c0_guest_entryhi 1637 (cop0) & ASID_MASK)); 1638 1639 if (index < 0) { 1640 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); 1641 vcpu->arch.host_cp0_badvaddr = va; 1642 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, 1643 vcpu); 1644 preempt_enable(); 1645 goto dont_update_pc; 1646 } else { 1647 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 1648 /* 1649 * Check if the entry is valid, if not then setup a TLB 1650 * invalid exception to the guest 1651 */ 1652 if (!TLB_IS_VALID(*tlb, va)) { 1653 er = kvm_mips_emulate_tlbinv_ld(cause, NULL, 1654 run, vcpu); 1655 preempt_enable(); 1656 goto dont_update_pc; 1657 } else { 1658 /* 1659 * We fault an entry from the guest tlb to the 1660 * shadow host TLB 1661 */ 1662 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, 1663 NULL, 1664 NULL); 1665 } 1666 } 1667 } else { 1668 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1669 cache, op, base, arch->gprs[base], offset); 1670 er = EMULATE_FAIL; 1671 preempt_enable(); 1672 goto dont_update_pc; 1673 1674 } 1675 1676 skip_fault: 1677 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ 1678 if (cache == MIPS_CACHE_DCACHE 1679 && (op == MIPS_CACHE_OP_FILL_WB_INV 1680 || op == MIPS_CACHE_OP_HIT_INV)) { 1681 flush_dcache_line(va); 1682 1683 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1684 /* 1685 * Replace the CACHE instruction, with a SYNCI, not the same, 1686 * but avoids a trap 1687 */ 1688 kvm_mips_trans_cache_va(inst, opc, vcpu); 1689 #endif 1690 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { 1691 flush_dcache_line(va); 1692 flush_icache_line(va); 1693 1694 #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1695 /* Replace the CACHE instruction, with a SYNCI */ 1696 kvm_mips_trans_cache_va(inst, opc, vcpu); 1697 #endif 1698 } else { 1699 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1700 cache, op, base, arch->gprs[base], offset); 1701 er = EMULATE_FAIL; 1702 preempt_enable(); 1703 goto dont_update_pc; 1704 } 1705 1706 preempt_enable(); 1707 1708 dont_update_pc: 1709 /* Rollback PC */ 1710 vcpu->arch.pc = curr_pc; 1711 done: 1712 return er; 1713 } 1714 1715 enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, 1716 struct kvm_run *run, 1717 struct kvm_vcpu *vcpu) 1718 { 1719 enum emulation_result er = EMULATE_DONE; 1720 uint32_t inst; 1721 1722 /* Fetch the instruction. */ 1723 if (cause & CAUSEF_BD) 1724 opc += 1; 1725 1726 inst = kvm_get_inst(opc, vcpu); 1727 1728 switch (((union mips_instruction)inst).r_format.opcode) { 1729 case cop0_op: 1730 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); 1731 break; 1732 case sb_op: 1733 case sh_op: 1734 case sw_op: 1735 er = kvm_mips_emulate_store(inst, cause, run, vcpu); 1736 break; 1737 case lb_op: 1738 case lbu_op: 1739 case lhu_op: 1740 case lh_op: 1741 case lw_op: 1742 er = kvm_mips_emulate_load(inst, cause, run, vcpu); 1743 break; 1744 1745 case cache_op: 1746 ++vcpu->stat.cache_exits; 1747 trace_kvm_exit(vcpu, CACHE_EXITS); 1748 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); 1749 break; 1750 1751 default: 1752 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, 1753 inst); 1754 kvm_arch_vcpu_dump_regs(vcpu); 1755 er = EMULATE_FAIL; 1756 break; 1757 } 1758 1759 return er; 1760 } 1761 1762 enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, 1763 uint32_t *opc, 1764 struct kvm_run *run, 1765 struct kvm_vcpu *vcpu) 1766 { 1767 struct mips_coproc *cop0 = vcpu->arch.cop0; 1768 struct kvm_vcpu_arch *arch = &vcpu->arch; 1769 enum emulation_result er = EMULATE_DONE; 1770 1771 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1772 /* save old pc */ 1773 kvm_write_c0_guest_epc(cop0, arch->pc); 1774 kvm_set_c0_guest_status(cop0, ST0_EXL); 1775 1776 if (cause & CAUSEF_BD) 1777 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1778 else 1779 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1780 1781 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); 1782 1783 kvm_change_c0_guest_cause(cop0, (0xff), 1784 (T_SYSCALL << CAUSEB_EXCCODE)); 1785 1786 /* Set PC to the exception entry point */ 1787 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1788 1789 } else { 1790 kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); 1791 er = EMULATE_FAIL; 1792 } 1793 1794 return er; 1795 } 1796 1797 enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, 1798 uint32_t *opc, 1799 struct kvm_run *run, 1800 struct kvm_vcpu *vcpu) 1801 { 1802 struct mips_coproc *cop0 = vcpu->arch.cop0; 1803 struct kvm_vcpu_arch *arch = &vcpu->arch; 1804 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | 1805 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1806 1807 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1808 /* save old pc */ 1809 kvm_write_c0_guest_epc(cop0, arch->pc); 1810 kvm_set_c0_guest_status(cop0, ST0_EXL); 1811 1812 if (cause & CAUSEF_BD) 1813 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1814 else 1815 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1816 1817 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", 1818 arch->pc); 1819 1820 /* set pc to the exception entry point */ 1821 arch->pc = KVM_GUEST_KSEG0 + 0x0; 1822 1823 } else { 1824 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 1825 arch->pc); 1826 1827 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1828 } 1829 1830 kvm_change_c0_guest_cause(cop0, (0xff), 1831 (T_TLB_LD_MISS << CAUSEB_EXCCODE)); 1832 1833 /* setup badvaddr, context and entryhi registers for the guest */ 1834 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1835 /* XXXKYMA: is the context register used by linux??? */ 1836 kvm_write_c0_guest_entryhi(cop0, entryhi); 1837 /* Blow away the shadow host TLBs */ 1838 kvm_mips_flush_host_tlb(1); 1839 1840 return EMULATE_DONE; 1841 } 1842 1843 enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, 1844 uint32_t *opc, 1845 struct kvm_run *run, 1846 struct kvm_vcpu *vcpu) 1847 { 1848 struct mips_coproc *cop0 = vcpu->arch.cop0; 1849 struct kvm_vcpu_arch *arch = &vcpu->arch; 1850 unsigned long entryhi = 1851 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1852 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1853 1854 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1855 /* save old pc */ 1856 kvm_write_c0_guest_epc(cop0, arch->pc); 1857 kvm_set_c0_guest_status(cop0, ST0_EXL); 1858 1859 if (cause & CAUSEF_BD) 1860 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1861 else 1862 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1863 1864 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", 1865 arch->pc); 1866 1867 /* set pc to the exception entry point */ 1868 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1869 1870 } else { 1871 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 1872 arch->pc); 1873 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1874 } 1875 1876 kvm_change_c0_guest_cause(cop0, (0xff), 1877 (T_TLB_LD_MISS << CAUSEB_EXCCODE)); 1878 1879 /* setup badvaddr, context and entryhi registers for the guest */ 1880 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1881 /* XXXKYMA: is the context register used by linux??? */ 1882 kvm_write_c0_guest_entryhi(cop0, entryhi); 1883 /* Blow away the shadow host TLBs */ 1884 kvm_mips_flush_host_tlb(1); 1885 1886 return EMULATE_DONE; 1887 } 1888 1889 enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, 1890 uint32_t *opc, 1891 struct kvm_run *run, 1892 struct kvm_vcpu *vcpu) 1893 { 1894 struct mips_coproc *cop0 = vcpu->arch.cop0; 1895 struct kvm_vcpu_arch *arch = &vcpu->arch; 1896 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1897 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1898 1899 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1900 /* save old pc */ 1901 kvm_write_c0_guest_epc(cop0, arch->pc); 1902 kvm_set_c0_guest_status(cop0, ST0_EXL); 1903 1904 if (cause & CAUSEF_BD) 1905 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1906 else 1907 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1908 1909 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 1910 arch->pc); 1911 1912 /* Set PC to the exception entry point */ 1913 arch->pc = KVM_GUEST_KSEG0 + 0x0; 1914 } else { 1915 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 1916 arch->pc); 1917 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1918 } 1919 1920 kvm_change_c0_guest_cause(cop0, (0xff), 1921 (T_TLB_ST_MISS << CAUSEB_EXCCODE)); 1922 1923 /* setup badvaddr, context and entryhi registers for the guest */ 1924 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1925 /* XXXKYMA: is the context register used by linux??? */ 1926 kvm_write_c0_guest_entryhi(cop0, entryhi); 1927 /* Blow away the shadow host TLBs */ 1928 kvm_mips_flush_host_tlb(1); 1929 1930 return EMULATE_DONE; 1931 } 1932 1933 enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, 1934 uint32_t *opc, 1935 struct kvm_run *run, 1936 struct kvm_vcpu *vcpu) 1937 { 1938 struct mips_coproc *cop0 = vcpu->arch.cop0; 1939 struct kvm_vcpu_arch *arch = &vcpu->arch; 1940 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1941 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1942 1943 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1944 /* save old pc */ 1945 kvm_write_c0_guest_epc(cop0, arch->pc); 1946 kvm_set_c0_guest_status(cop0, ST0_EXL); 1947 1948 if (cause & CAUSEF_BD) 1949 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1950 else 1951 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1952 1953 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 1954 arch->pc); 1955 1956 /* Set PC to the exception entry point */ 1957 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1958 } else { 1959 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 1960 arch->pc); 1961 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1962 } 1963 1964 kvm_change_c0_guest_cause(cop0, (0xff), 1965 (T_TLB_ST_MISS << CAUSEB_EXCCODE)); 1966 1967 /* setup badvaddr, context and entryhi registers for the guest */ 1968 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1969 /* XXXKYMA: is the context register used by linux??? */ 1970 kvm_write_c0_guest_entryhi(cop0, entryhi); 1971 /* Blow away the shadow host TLBs */ 1972 kvm_mips_flush_host_tlb(1); 1973 1974 return EMULATE_DONE; 1975 } 1976 1977 /* TLBMOD: store into address matching TLB with Dirty bit off */ 1978 enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, 1979 struct kvm_run *run, 1980 struct kvm_vcpu *vcpu) 1981 { 1982 enum emulation_result er = EMULATE_DONE; 1983 #ifdef DEBUG 1984 struct mips_coproc *cop0 = vcpu->arch.cop0; 1985 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1986 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1987 int index; 1988 1989 /* If address not in the guest TLB, then we are in trouble */ 1990 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 1991 if (index < 0) { 1992 /* XXXKYMA Invalidate and retry */ 1993 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); 1994 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", 1995 __func__, entryhi); 1996 kvm_mips_dump_guest_tlbs(vcpu); 1997 kvm_mips_dump_host_tlbs(); 1998 return EMULATE_FAIL; 1999 } 2000 #endif 2001 2002 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); 2003 return er; 2004 } 2005 2006 enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, 2007 uint32_t *opc, 2008 struct kvm_run *run, 2009 struct kvm_vcpu *vcpu) 2010 { 2011 struct mips_coproc *cop0 = vcpu->arch.cop0; 2012 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 2013 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 2014 struct kvm_vcpu_arch *arch = &vcpu->arch; 2015 2016 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2017 /* save old pc */ 2018 kvm_write_c0_guest_epc(cop0, arch->pc); 2019 kvm_set_c0_guest_status(cop0, ST0_EXL); 2020 2021 if (cause & CAUSEF_BD) 2022 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2023 else 2024 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2025 2026 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", 2027 arch->pc); 2028 2029 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2030 } else { 2031 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", 2032 arch->pc); 2033 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2034 } 2035 2036 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE)); 2037 2038 /* setup badvaddr, context and entryhi registers for the guest */ 2039 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2040 /* XXXKYMA: is the context register used by linux??? */ 2041 kvm_write_c0_guest_entryhi(cop0, entryhi); 2042 /* Blow away the shadow host TLBs */ 2043 kvm_mips_flush_host_tlb(1); 2044 2045 return EMULATE_DONE; 2046 } 2047 2048 enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, 2049 uint32_t *opc, 2050 struct kvm_run *run, 2051 struct kvm_vcpu *vcpu) 2052 { 2053 struct mips_coproc *cop0 = vcpu->arch.cop0; 2054 struct kvm_vcpu_arch *arch = &vcpu->arch; 2055 2056 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2057 /* save old pc */ 2058 kvm_write_c0_guest_epc(cop0, arch->pc); 2059 kvm_set_c0_guest_status(cop0, ST0_EXL); 2060 2061 if (cause & CAUSEF_BD) 2062 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2063 else 2064 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2065 2066 } 2067 2068 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2069 2070 kvm_change_c0_guest_cause(cop0, (0xff), 2071 (T_COP_UNUSABLE << CAUSEB_EXCCODE)); 2072 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); 2073 2074 return EMULATE_DONE; 2075 } 2076 2077 enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, 2078 uint32_t *opc, 2079 struct kvm_run *run, 2080 struct kvm_vcpu *vcpu) 2081 { 2082 struct mips_coproc *cop0 = vcpu->arch.cop0; 2083 struct kvm_vcpu_arch *arch = &vcpu->arch; 2084 enum emulation_result er = EMULATE_DONE; 2085 2086 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2087 /* save old pc */ 2088 kvm_write_c0_guest_epc(cop0, arch->pc); 2089 kvm_set_c0_guest_status(cop0, ST0_EXL); 2090 2091 if (cause & CAUSEF_BD) 2092 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2093 else 2094 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2095 2096 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); 2097 2098 kvm_change_c0_guest_cause(cop0, (0xff), 2099 (T_RES_INST << CAUSEB_EXCCODE)); 2100 2101 /* Set PC to the exception entry point */ 2102 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2103 2104 } else { 2105 kvm_err("Trying to deliver RI when EXL is already set\n"); 2106 er = EMULATE_FAIL; 2107 } 2108 2109 return er; 2110 } 2111 2112 enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, 2113 uint32_t *opc, 2114 struct kvm_run *run, 2115 struct kvm_vcpu *vcpu) 2116 { 2117 struct mips_coproc *cop0 = vcpu->arch.cop0; 2118 struct kvm_vcpu_arch *arch = &vcpu->arch; 2119 enum emulation_result er = EMULATE_DONE; 2120 2121 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2122 /* save old pc */ 2123 kvm_write_c0_guest_epc(cop0, arch->pc); 2124 kvm_set_c0_guest_status(cop0, ST0_EXL); 2125 2126 if (cause & CAUSEF_BD) 2127 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2128 else 2129 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2130 2131 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); 2132 2133 kvm_change_c0_guest_cause(cop0, (0xff), 2134 (T_BREAK << CAUSEB_EXCCODE)); 2135 2136 /* Set PC to the exception entry point */ 2137 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2138 2139 } else { 2140 kvm_err("Trying to deliver BP when EXL is already set\n"); 2141 er = EMULATE_FAIL; 2142 } 2143 2144 return er; 2145 } 2146 2147 enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, 2148 uint32_t *opc, 2149 struct kvm_run *run, 2150 struct kvm_vcpu *vcpu) 2151 { 2152 struct mips_coproc *cop0 = vcpu->arch.cop0; 2153 struct kvm_vcpu_arch *arch = &vcpu->arch; 2154 enum emulation_result er = EMULATE_DONE; 2155 2156 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2157 /* save old pc */ 2158 kvm_write_c0_guest_epc(cop0, arch->pc); 2159 kvm_set_c0_guest_status(cop0, ST0_EXL); 2160 2161 if (cause & CAUSEF_BD) 2162 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2163 else 2164 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2165 2166 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); 2167 2168 kvm_change_c0_guest_cause(cop0, (0xff), 2169 (T_TRAP << CAUSEB_EXCCODE)); 2170 2171 /* Set PC to the exception entry point */ 2172 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2173 2174 } else { 2175 kvm_err("Trying to deliver TRAP when EXL is already set\n"); 2176 er = EMULATE_FAIL; 2177 } 2178 2179 return er; 2180 } 2181 2182 enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, 2183 uint32_t *opc, 2184 struct kvm_run *run, 2185 struct kvm_vcpu *vcpu) 2186 { 2187 struct mips_coproc *cop0 = vcpu->arch.cop0; 2188 struct kvm_vcpu_arch *arch = &vcpu->arch; 2189 enum emulation_result er = EMULATE_DONE; 2190 2191 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2192 /* save old pc */ 2193 kvm_write_c0_guest_epc(cop0, arch->pc); 2194 kvm_set_c0_guest_status(cop0, ST0_EXL); 2195 2196 if (cause & CAUSEF_BD) 2197 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2198 else 2199 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2200 2201 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); 2202 2203 kvm_change_c0_guest_cause(cop0, (0xff), 2204 (T_MSAFPE << CAUSEB_EXCCODE)); 2205 2206 /* Set PC to the exception entry point */ 2207 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2208 2209 } else { 2210 kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); 2211 er = EMULATE_FAIL; 2212 } 2213 2214 return er; 2215 } 2216 2217 enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, 2218 uint32_t *opc, 2219 struct kvm_run *run, 2220 struct kvm_vcpu *vcpu) 2221 { 2222 struct mips_coproc *cop0 = vcpu->arch.cop0; 2223 struct kvm_vcpu_arch *arch = &vcpu->arch; 2224 enum emulation_result er = EMULATE_DONE; 2225 2226 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2227 /* save old pc */ 2228 kvm_write_c0_guest_epc(cop0, arch->pc); 2229 kvm_set_c0_guest_status(cop0, ST0_EXL); 2230 2231 if (cause & CAUSEF_BD) 2232 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2233 else 2234 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2235 2236 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); 2237 2238 kvm_change_c0_guest_cause(cop0, (0xff), 2239 (T_FPE << CAUSEB_EXCCODE)); 2240 2241 /* Set PC to the exception entry point */ 2242 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2243 2244 } else { 2245 kvm_err("Trying to deliver FPE when EXL is already set\n"); 2246 er = EMULATE_FAIL; 2247 } 2248 2249 return er; 2250 } 2251 2252 enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, 2253 uint32_t *opc, 2254 struct kvm_run *run, 2255 struct kvm_vcpu *vcpu) 2256 { 2257 struct mips_coproc *cop0 = vcpu->arch.cop0; 2258 struct kvm_vcpu_arch *arch = &vcpu->arch; 2259 enum emulation_result er = EMULATE_DONE; 2260 2261 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2262 /* save old pc */ 2263 kvm_write_c0_guest_epc(cop0, arch->pc); 2264 kvm_set_c0_guest_status(cop0, ST0_EXL); 2265 2266 if (cause & CAUSEF_BD) 2267 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2268 else 2269 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2270 2271 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); 2272 2273 kvm_change_c0_guest_cause(cop0, (0xff), 2274 (T_MSADIS << CAUSEB_EXCCODE)); 2275 2276 /* Set PC to the exception entry point */ 2277 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2278 2279 } else { 2280 kvm_err("Trying to deliver MSADIS when EXL is already set\n"); 2281 er = EMULATE_FAIL; 2282 } 2283 2284 return er; 2285 } 2286 2287 /* ll/sc, rdhwr, sync emulation */ 2288 2289 #define OPCODE 0xfc000000 2290 #define BASE 0x03e00000 2291 #define RT 0x001f0000 2292 #define OFFSET 0x0000ffff 2293 #define LL 0xc0000000 2294 #define SC 0xe0000000 2295 #define SPEC0 0x00000000 2296 #define SPEC3 0x7c000000 2297 #define RD 0x0000f800 2298 #define FUNC 0x0000003f 2299 #define SYNC 0x0000000f 2300 #define RDHWR 0x0000003b 2301 2302 enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, 2303 struct kvm_run *run, 2304 struct kvm_vcpu *vcpu) 2305 { 2306 struct mips_coproc *cop0 = vcpu->arch.cop0; 2307 struct kvm_vcpu_arch *arch = &vcpu->arch; 2308 enum emulation_result er = EMULATE_DONE; 2309 unsigned long curr_pc; 2310 uint32_t inst; 2311 2312 /* 2313 * Update PC and hold onto current PC in case there is 2314 * an error and we want to rollback the PC 2315 */ 2316 curr_pc = vcpu->arch.pc; 2317 er = update_pc(vcpu, cause); 2318 if (er == EMULATE_FAIL) 2319 return er; 2320 2321 /* Fetch the instruction. */ 2322 if (cause & CAUSEF_BD) 2323 opc += 1; 2324 2325 inst = kvm_get_inst(opc, vcpu); 2326 2327 if (inst == KVM_INVALID_INST) { 2328 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); 2329 return EMULATE_FAIL; 2330 } 2331 2332 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { 2333 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 2334 int rd = (inst & RD) >> 11; 2335 int rt = (inst & RT) >> 16; 2336 /* If usermode, check RDHWR rd is allowed by guest HWREna */ 2337 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { 2338 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", 2339 rd, opc); 2340 goto emulate_ri; 2341 } 2342 switch (rd) { 2343 case 0: /* CPU number */ 2344 arch->gprs[rt] = 0; 2345 break; 2346 case 1: /* SYNCI length */ 2347 arch->gprs[rt] = min(current_cpu_data.dcache.linesz, 2348 current_cpu_data.icache.linesz); 2349 break; 2350 case 2: /* Read count register */ 2351 arch->gprs[rt] = kvm_mips_read_count(vcpu); 2352 break; 2353 case 3: /* Count register resolution */ 2354 switch (current_cpu_data.cputype) { 2355 case CPU_20KC: 2356 case CPU_25KF: 2357 arch->gprs[rt] = 1; 2358 break; 2359 default: 2360 arch->gprs[rt] = 2; 2361 } 2362 break; 2363 case 29: 2364 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); 2365 break; 2366 2367 default: 2368 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); 2369 goto emulate_ri; 2370 } 2371 } else { 2372 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst); 2373 goto emulate_ri; 2374 } 2375 2376 return EMULATE_DONE; 2377 2378 emulate_ri: 2379 /* 2380 * Rollback PC (if in branch delay slot then the PC already points to 2381 * branch target), and pass the RI exception to the guest OS. 2382 */ 2383 vcpu->arch.pc = curr_pc; 2384 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); 2385 } 2386 2387 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, 2388 struct kvm_run *run) 2389 { 2390 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; 2391 enum emulation_result er = EMULATE_DONE; 2392 unsigned long curr_pc; 2393 2394 if (run->mmio.len > sizeof(*gpr)) { 2395 kvm_err("Bad MMIO length: %d", run->mmio.len); 2396 er = EMULATE_FAIL; 2397 goto done; 2398 } 2399 2400 /* 2401 * Update PC and hold onto current PC in case there is 2402 * an error and we want to rollback the PC 2403 */ 2404 curr_pc = vcpu->arch.pc; 2405 er = update_pc(vcpu, vcpu->arch.pending_load_cause); 2406 if (er == EMULATE_FAIL) 2407 return er; 2408 2409 switch (run->mmio.len) { 2410 case 4: 2411 *gpr = *(int32_t *) run->mmio.data; 2412 break; 2413 2414 case 2: 2415 if (vcpu->mmio_needed == 2) 2416 *gpr = *(int16_t *) run->mmio.data; 2417 else 2418 *gpr = *(int16_t *) run->mmio.data; 2419 2420 break; 2421 case 1: 2422 if (vcpu->mmio_needed == 2) 2423 *gpr = *(int8_t *) run->mmio.data; 2424 else 2425 *gpr = *(u8 *) run->mmio.data; 2426 break; 2427 } 2428 2429 if (vcpu->arch.pending_load_cause & CAUSEF_BD) 2430 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", 2431 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, 2432 vcpu->mmio_needed); 2433 2434 done: 2435 return er; 2436 } 2437 2438 static enum emulation_result kvm_mips_emulate_exc(unsigned long cause, 2439 uint32_t *opc, 2440 struct kvm_run *run, 2441 struct kvm_vcpu *vcpu) 2442 { 2443 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2444 struct mips_coproc *cop0 = vcpu->arch.cop0; 2445 struct kvm_vcpu_arch *arch = &vcpu->arch; 2446 enum emulation_result er = EMULATE_DONE; 2447 2448 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2449 /* save old pc */ 2450 kvm_write_c0_guest_epc(cop0, arch->pc); 2451 kvm_set_c0_guest_status(cop0, ST0_EXL); 2452 2453 if (cause & CAUSEF_BD) 2454 kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2455 else 2456 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2457 2458 kvm_change_c0_guest_cause(cop0, (0xff), 2459 (exccode << CAUSEB_EXCCODE)); 2460 2461 /* Set PC to the exception entry point */ 2462 arch->pc = KVM_GUEST_KSEG0 + 0x180; 2463 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 2464 2465 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", 2466 exccode, kvm_read_c0_guest_epc(cop0), 2467 kvm_read_c0_guest_badvaddr(cop0)); 2468 } else { 2469 kvm_err("Trying to deliver EXC when EXL is already set\n"); 2470 er = EMULATE_FAIL; 2471 } 2472 2473 return er; 2474 } 2475 2476 enum emulation_result kvm_mips_check_privilege(unsigned long cause, 2477 uint32_t *opc, 2478 struct kvm_run *run, 2479 struct kvm_vcpu *vcpu) 2480 { 2481 enum emulation_result er = EMULATE_DONE; 2482 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2483 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 2484 2485 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 2486 2487 if (usermode) { 2488 switch (exccode) { 2489 case T_INT: 2490 case T_SYSCALL: 2491 case T_BREAK: 2492 case T_RES_INST: 2493 case T_TRAP: 2494 case T_MSAFPE: 2495 case T_FPE: 2496 case T_MSADIS: 2497 break; 2498 2499 case T_COP_UNUSABLE: 2500 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) 2501 er = EMULATE_PRIV_FAIL; 2502 break; 2503 2504 case T_TLB_MOD: 2505 break; 2506 2507 case T_TLB_LD_MISS: 2508 /* 2509 * We we are accessing Guest kernel space, then send an 2510 * address error exception to the guest 2511 */ 2512 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2513 kvm_debug("%s: LD MISS @ %#lx\n", __func__, 2514 badvaddr); 2515 cause &= ~0xff; 2516 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); 2517 er = EMULATE_PRIV_FAIL; 2518 } 2519 break; 2520 2521 case T_TLB_ST_MISS: 2522 /* 2523 * We we are accessing Guest kernel space, then send an 2524 * address error exception to the guest 2525 */ 2526 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2527 kvm_debug("%s: ST MISS @ %#lx\n", __func__, 2528 badvaddr); 2529 cause &= ~0xff; 2530 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); 2531 er = EMULATE_PRIV_FAIL; 2532 } 2533 break; 2534 2535 case T_ADDR_ERR_ST: 2536 kvm_debug("%s: address error ST @ %#lx\n", __func__, 2537 badvaddr); 2538 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2539 cause &= ~0xff; 2540 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); 2541 } 2542 er = EMULATE_PRIV_FAIL; 2543 break; 2544 case T_ADDR_ERR_LD: 2545 kvm_debug("%s: address error LD @ %#lx\n", __func__, 2546 badvaddr); 2547 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2548 cause &= ~0xff; 2549 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); 2550 } 2551 er = EMULATE_PRIV_FAIL; 2552 break; 2553 default: 2554 er = EMULATE_PRIV_FAIL; 2555 break; 2556 } 2557 } 2558 2559 if (er == EMULATE_PRIV_FAIL) 2560 kvm_mips_emulate_exc(cause, opc, run, vcpu); 2561 2562 return er; 2563 } 2564 2565 /* 2566 * User Address (UA) fault, this could happen if 2567 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this 2568 * case we pass on the fault to the guest kernel and let it handle it. 2569 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this 2570 * case we inject the TLB from the Guest TLB into the shadow host TLB 2571 */ 2572 enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, 2573 uint32_t *opc, 2574 struct kvm_run *run, 2575 struct kvm_vcpu *vcpu) 2576 { 2577 enum emulation_result er = EMULATE_DONE; 2578 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2579 unsigned long va = vcpu->arch.host_cp0_badvaddr; 2580 int index; 2581 2582 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", 2583 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); 2584 2585 /* 2586 * KVM would not have got the exception if this entry was valid in the 2587 * shadow host TLB. Check the Guest TLB, if the entry is not there then 2588 * send the guest an exception. The guest exc handler should then inject 2589 * an entry into the guest TLB. 2590 */ 2591 index = kvm_mips_guest_tlb_lookup(vcpu, 2592 (va & VPN2_MASK) | 2593 (kvm_read_c0_guest_entryhi 2594 (vcpu->arch.cop0) & ASID_MASK)); 2595 if (index < 0) { 2596 if (exccode == T_TLB_LD_MISS) { 2597 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); 2598 } else if (exccode == T_TLB_ST_MISS) { 2599 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); 2600 } else { 2601 kvm_err("%s: invalid exc code: %d\n", __func__, 2602 exccode); 2603 er = EMULATE_FAIL; 2604 } 2605 } else { 2606 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 2607 2608 /* 2609 * Check if the entry is valid, if not then setup a TLB invalid 2610 * exception to the guest 2611 */ 2612 if (!TLB_IS_VALID(*tlb, va)) { 2613 if (exccode == T_TLB_LD_MISS) { 2614 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, 2615 vcpu); 2616 } else if (exccode == T_TLB_ST_MISS) { 2617 er = kvm_mips_emulate_tlbinv_st(cause, opc, run, 2618 vcpu); 2619 } else { 2620 kvm_err("%s: invalid exc code: %d\n", __func__, 2621 exccode); 2622 er = EMULATE_FAIL; 2623 } 2624 } else { 2625 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", 2626 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); 2627 /* 2628 * OK we have a Guest TLB entry, now inject it into the 2629 * shadow host TLB 2630 */ 2631 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, 2632 NULL); 2633 } 2634 } 2635 2636 return er; 2637 } 2638