1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: Support for hardware virtualization extensions 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Yann Le Du <ledu@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/preempt.h> 16 #include <linux/vmalloc.h> 17 #include <asm/cacheflush.h> 18 #include <asm/cacheops.h> 19 #include <asm/cmpxchg.h> 20 #include <asm/fpu.h> 21 #include <asm/hazards.h> 22 #include <asm/inst.h> 23 #include <asm/mmu_context.h> 24 #include <asm/r4kcache.h> 25 #include <asm/time.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbex.h> 28 29 #include <linux/kvm_host.h> 30 31 #include "interrupt.h" 32 33 #include "trace.h" 34 35 /* Pointers to last VCPU loaded on each physical CPU */ 36 static struct kvm_vcpu *last_vcpu[NR_CPUS]; 37 /* Pointers to last VCPU executed on each physical CPU */ 38 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS]; 39 40 /* 41 * Number of guest VTLB entries to use, so we can catch inconsistency between 42 * CPUs. 43 */ 44 static unsigned int kvm_vz_guest_vtlb_size; 45 46 static inline long kvm_vz_read_gc0_ebase(void) 47 { 48 if (sizeof(long) == 8 && cpu_has_ebase_wg) 49 return read_gc0_ebase_64(); 50 else 51 return read_gc0_ebase(); 52 } 53 54 static inline void kvm_vz_write_gc0_ebase(long v) 55 { 56 /* 57 * First write with WG=1 to write upper bits, then write again in case 58 * WG should be left at 0. 59 * write_gc0_ebase_64() is no longer UNDEFINED since R6. 60 */ 61 if (sizeof(long) == 8 && 62 (cpu_has_mips64r6 || cpu_has_ebase_wg)) { 63 write_gc0_ebase_64(v | MIPS_EBASE_WG); 64 write_gc0_ebase_64(v); 65 } else { 66 write_gc0_ebase(v | MIPS_EBASE_WG); 67 write_gc0_ebase(v); 68 } 69 } 70 71 /* 72 * These Config bits may be writable by the guest: 73 * Config: [K23, KU] (!TLB), K0 74 * Config1: (none) 75 * Config2: [TU, SU] (impl) 76 * Config3: ISAOnExc 77 * Config4: FTLBPageSize 78 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR 79 */ 80 81 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu) 82 { 83 return CONF_CM_CMASK; 84 } 85 86 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu) 87 { 88 return 0; 89 } 90 91 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu) 92 { 93 return 0; 94 } 95 96 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu) 97 { 98 return MIPS_CONF3_ISA_OE; 99 } 100 101 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu) 102 { 103 /* no need to be exact */ 104 return MIPS_CONF4_VFTLBPAGESIZE; 105 } 106 107 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu) 108 { 109 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI; 110 111 /* Permit MSAEn changes if MSA supported and enabled */ 112 if (kvm_mips_guest_has_msa(&vcpu->arch)) 113 mask |= MIPS_CONF5_MSAEN; 114 115 /* 116 * Permit guest FPU mode changes if FPU is enabled and the relevant 117 * feature exists according to FIR register. 118 */ 119 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 120 if (cpu_has_ufr) 121 mask |= MIPS_CONF5_UFR; 122 if (cpu_has_fre) 123 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE; 124 } 125 126 return mask; 127 } 128 129 /* 130 * VZ optionally allows these additional Config bits to be written by root: 131 * Config: M, [MT] 132 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP 133 * Config2: M 134 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP, CTXTC, ITL, LPA, VEIC, 135 * VInt, SP, CDMM, MT, SM, TL] 136 * Config4: M, [VTLBSizeExt, MMUSizeExt] 137 * Config5: [MRP] 138 */ 139 140 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu) 141 { 142 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M; 143 } 144 145 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu) 146 { 147 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M; 148 149 /* Permit FPU to be present if FPU is supported */ 150 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 151 mask |= MIPS_CONF1_FP; 152 153 return mask; 154 } 155 156 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu) 157 { 158 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M; 159 } 160 161 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu) 162 { 163 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M | 164 MIPS_CONF3_ULRI; 165 166 /* Permit MSA to be present if MSA is supported */ 167 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 168 mask |= MIPS_CONF3_MSA; 169 170 return mask; 171 } 172 173 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu) 174 { 175 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M; 176 } 177 178 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu) 179 { 180 return kvm_vz_config5_guest_wrmask(vcpu); 181 } 182 183 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) 184 { 185 /* VZ guest has already converted gva to gpa */ 186 return gva; 187 } 188 189 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 190 { 191 set_bit(priority, &vcpu->arch.pending_exceptions); 192 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 193 } 194 195 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 196 { 197 clear_bit(priority, &vcpu->arch.pending_exceptions); 198 set_bit(priority, &vcpu->arch.pending_exceptions_clr); 199 } 200 201 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu) 202 { 203 /* 204 * timer expiry is asynchronous to vcpu execution therefore defer guest 205 * cp0 accesses 206 */ 207 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 208 } 209 210 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) 211 { 212 /* 213 * timer expiry is asynchronous to vcpu execution therefore defer guest 214 * cp0 accesses 215 */ 216 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); 217 } 218 219 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu, 220 struct kvm_mips_interrupt *irq) 221 { 222 int intr = (int)irq->irq; 223 224 /* 225 * interrupts are asynchronous to vcpu execution therefore defer guest 226 * cp0 accesses 227 */ 228 switch (intr) { 229 case 2: 230 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO); 231 break; 232 233 case 3: 234 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); 235 break; 236 237 case 4: 238 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); 239 break; 240 241 default: 242 break; 243 } 244 245 } 246 247 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 248 struct kvm_mips_interrupt *irq) 249 { 250 int intr = (int)irq->irq; 251 252 /* 253 * interrupts are asynchronous to vcpu execution therefore defer guest 254 * cp0 accesses 255 */ 256 switch (intr) { 257 case -2: 258 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO); 259 break; 260 261 case -3: 262 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); 263 break; 264 265 case -4: 266 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); 267 break; 268 269 default: 270 break; 271 } 272 273 } 274 275 static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = { 276 [MIPS_EXC_INT_TIMER] = C_IRQ5, 277 [MIPS_EXC_INT_IO] = C_IRQ0, 278 [MIPS_EXC_INT_IPI_1] = C_IRQ1, 279 [MIPS_EXC_INT_IPI_2] = C_IRQ2, 280 }; 281 282 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 283 u32 cause) 284 { 285 u32 irq = (priority < MIPS_EXC_MAX) ? 286 kvm_vz_priority_to_irq[priority] : 0; 287 288 switch (priority) { 289 case MIPS_EXC_INT_TIMER: 290 set_gc0_cause(C_TI); 291 break; 292 293 case MIPS_EXC_INT_IO: 294 case MIPS_EXC_INT_IPI_1: 295 case MIPS_EXC_INT_IPI_2: 296 if (cpu_has_guestctl2) 297 set_c0_guestctl2(irq); 298 else 299 set_gc0_cause(irq); 300 break; 301 302 default: 303 break; 304 } 305 306 clear_bit(priority, &vcpu->arch.pending_exceptions); 307 return 1; 308 } 309 310 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 311 u32 cause) 312 { 313 u32 irq = (priority < MIPS_EXC_MAX) ? 314 kvm_vz_priority_to_irq[priority] : 0; 315 316 switch (priority) { 317 case MIPS_EXC_INT_TIMER: 318 /* 319 * Call to kvm_write_c0_guest_compare() clears Cause.TI in 320 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with 321 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not 322 * supported or if not using GuestCtl2 Hardware Clear. 323 */ 324 if (cpu_has_guestctl2) { 325 if (!(read_c0_guestctl2() & (irq << 14))) 326 clear_c0_guestctl2(irq); 327 } else { 328 clear_gc0_cause(irq); 329 } 330 break; 331 332 case MIPS_EXC_INT_IO: 333 case MIPS_EXC_INT_IPI_1: 334 case MIPS_EXC_INT_IPI_2: 335 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ 336 if (cpu_has_guestctl2) { 337 if (!(read_c0_guestctl2() & (irq << 14))) 338 clear_c0_guestctl2(irq); 339 } else { 340 clear_gc0_cause(irq); 341 } 342 break; 343 344 default: 345 break; 346 } 347 348 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 349 return 1; 350 } 351 352 /* 353 * VZ guest timer handling. 354 */ 355 356 /** 357 * _kvm_vz_restore_stimer() - Restore soft timer state. 358 * @vcpu: Virtual CPU. 359 * @compare: CP0_Compare register value, restored by caller. 360 * @cause: CP0_Cause register to restore. 361 * 362 * Restore VZ state relating to the soft timer. 363 */ 364 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare, 365 u32 cause) 366 { 367 /* 368 * Avoid spurious counter interrupts by setting Guest CP0_Count to just 369 * after Guest CP0_Compare. 370 */ 371 write_c0_gtoffset(compare - read_c0_count()); 372 373 back_to_back_c0_hazard(); 374 write_gc0_cause(cause); 375 } 376 377 /** 378 * kvm_vz_restore_timer() - Restore guest timer state. 379 * @vcpu: Virtual CPU. 380 * 381 * Restore soft timer state from saved context. 382 */ 383 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) 384 { 385 struct mips_coproc *cop0 = vcpu->arch.cop0; 386 u32 cause, compare; 387 388 compare = kvm_read_sw_gc0_compare(cop0); 389 cause = kvm_read_sw_gc0_cause(cop0); 390 391 write_gc0_compare(compare); 392 _kvm_vz_restore_stimer(vcpu, compare, cause); 393 } 394 395 /** 396 * kvm_vz_save_timer() - Save guest timer state. 397 * @vcpu: Virtual CPU. 398 * 399 * Save VZ guest timer state. 400 */ 401 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) 402 { 403 struct mips_coproc *cop0 = vcpu->arch.cop0; 404 u32 compare, cause; 405 406 compare = read_gc0_compare(); 407 cause = read_gc0_cause(); 408 409 /* save timer-related state to VCPU context */ 410 kvm_write_sw_gc0_cause(cop0, cause); 411 kvm_write_sw_gc0_compare(cop0, compare); 412 } 413 414 /** 415 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA. 416 * @vcpu: KVM VCPU state. 417 * @gva: Guest virtual address to convert. 418 * @gpa: Output guest physical address. 419 * 420 * Convert a guest virtual address (GVA) which is valid according to the guest 421 * context, to a guest physical address (GPA). 422 * 423 * Returns: 0 on success. 424 * -errno on failure. 425 */ 426 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 427 unsigned long *gpa) 428 { 429 u32 gva32 = gva; 430 431 if ((long)gva == (s32)gva32) { 432 /* Handle canonical 32-bit virtual address */ 433 if ((s32)gva32 < (s32)0xc0000000) { 434 /* legacy unmapped KSeg0 or KSeg1 */ 435 *gpa = gva32 & 0x1fffffff; 436 return 0; 437 } 438 #ifdef CONFIG_64BIT 439 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { 440 /* XKPHYS */ 441 /* 442 * Traditionally fully unmapped. 443 * Bits 61:59 specify the CCA, which we can just mask off here. 444 * Bits 58:PABITS should be zero, but we shouldn't have got here 445 * if it wasn't. 446 */ 447 *gpa = gva & 0x07ffffffffffffff; 448 return 0; 449 #endif 450 } 451 452 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); 453 } 454 455 /** 456 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA. 457 * @vcpu: KVM VCPU state. 458 * @badvaddr: Root BadVAddr. 459 * @gpa: Output guest physical address. 460 * 461 * VZ implementations are permitted to report guest virtual addresses (GVA) in 462 * BadVAddr on a root exception during guest execution, instead of the more 463 * convenient guest physical addresses (GPA). When we get a GVA, this function 464 * converts it to a GPA, taking into account guest segmentation and guest TLB 465 * state. 466 * 467 * Returns: 0 on success. 468 * -errno on failure. 469 */ 470 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr, 471 unsigned long *gpa) 472 { 473 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & 474 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 475 476 /* If BadVAddr is GPA, then all is well in the world */ 477 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) { 478 *gpa = badvaddr; 479 return 0; 480 } 481 482 /* Otherwise we'd expect it to be GVA ... */ 483 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA, 484 "Unexpected gexccode %#x\n", gexccode)) 485 return -EINVAL; 486 487 /* ... and we need to perform the GVA->GPA translation in software */ 488 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa); 489 } 490 491 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu) 492 { 493 u32 *opc = (u32 *) vcpu->arch.pc; 494 u32 cause = vcpu->arch.host_cp0_cause; 495 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 496 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 497 u32 inst = 0; 498 499 /* 500 * Fetch the instruction. 501 */ 502 if (cause & CAUSEF_BD) 503 opc += 1; 504 kvm_get_badinstr(opc, vcpu, &inst); 505 506 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", 507 exccode, opc, inst, badvaddr, 508 read_gc0_status()); 509 kvm_arch_vcpu_dump_regs(vcpu); 510 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 511 return RESUME_HOST; 512 } 513 514 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, 515 u32 *opc, u32 cause, 516 struct kvm_run *run, 517 struct kvm_vcpu *vcpu) 518 { 519 struct mips_coproc *cop0 = vcpu->arch.cop0; 520 enum emulation_result er = EMULATE_DONE; 521 u32 rt, rd, sel; 522 unsigned long curr_pc; 523 unsigned long val; 524 525 /* 526 * Update PC and hold onto current PC in case there is 527 * an error and we want to rollback the PC 528 */ 529 curr_pc = vcpu->arch.pc; 530 er = update_pc(vcpu, cause); 531 if (er == EMULATE_FAIL) 532 return er; 533 534 if (inst.co_format.co) { 535 switch (inst.co_format.func) { 536 case wait_op: 537 er = kvm_mips_emul_wait(vcpu); 538 break; 539 default: 540 er = EMULATE_FAIL; 541 } 542 } else { 543 rt = inst.c0r_format.rt; 544 rd = inst.c0r_format.rd; 545 sel = inst.c0r_format.sel; 546 547 switch (inst.c0r_format.rs) { 548 case dmfc_op: 549 case mfc_op: 550 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 551 cop0->stat[rd][sel]++; 552 #endif 553 if (rd == MIPS_CP0_COUNT && 554 sel == 0) { /* Count */ 555 val = kvm_mips_read_count(vcpu); 556 } else if (rd == MIPS_CP0_COMPARE && 557 sel == 0) { /* Compare */ 558 val = read_gc0_compare(); 559 } else if ((rd == MIPS_CP0_PRID && 560 (sel == 0 || /* PRid */ 561 sel == 2 || /* CDMMBase */ 562 sel == 3)) || /* CMGCRBase */ 563 (rd == MIPS_CP0_STATUS && 564 (sel == 2 || /* SRSCtl */ 565 sel == 3)) || /* SRSMap */ 566 (rd == MIPS_CP0_CONFIG && 567 (sel == 7)) || /* Config7 */ 568 (rd == MIPS_CP0_ERRCTL && 569 (sel == 0))) { /* ErrCtl */ 570 val = cop0->reg[rd][sel]; 571 } else { 572 val = 0; 573 er = EMULATE_FAIL; 574 } 575 576 if (er != EMULATE_FAIL) { 577 /* Sign extend */ 578 if (inst.c0r_format.rs == mfc_op) 579 val = (int)val; 580 vcpu->arch.gprs[rt] = val; 581 } 582 583 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ? 584 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0, 585 KVM_TRACE_COP0(rd, sel), val); 586 break; 587 588 case dmtc_op: 589 case mtc_op: 590 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 591 cop0->stat[rd][sel]++; 592 #endif 593 val = vcpu->arch.gprs[rt]; 594 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ? 595 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0, 596 KVM_TRACE_COP0(rd, sel), val); 597 598 if (rd == MIPS_CP0_COUNT && 599 sel == 0) { /* Count */ 600 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 601 } else if (rd == MIPS_CP0_COMPARE && 602 sel == 0) { /* Compare */ 603 kvm_mips_write_compare(vcpu, 604 vcpu->arch.gprs[rt], 605 true); 606 } else if (rd == MIPS_CP0_ERRCTL && 607 (sel == 0)) { /* ErrCtl */ 608 /* ignore the written value */ 609 } else { 610 er = EMULATE_FAIL; 611 } 612 break; 613 614 default: 615 er = EMULATE_FAIL; 616 break; 617 } 618 } 619 /* Rollback PC only if emulation was unsuccessful */ 620 if (er == EMULATE_FAIL) { 621 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n", 622 curr_pc, __func__, inst.word); 623 624 vcpu->arch.pc = curr_pc; 625 } 626 627 return er; 628 } 629 630 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, 631 u32 *opc, u32 cause, 632 struct kvm_run *run, 633 struct kvm_vcpu *vcpu) 634 { 635 enum emulation_result er = EMULATE_DONE; 636 u32 cache, op_inst, op, base; 637 s16 offset; 638 struct kvm_vcpu_arch *arch = &vcpu->arch; 639 unsigned long va, curr_pc; 640 641 /* 642 * Update PC and hold onto current PC in case there is 643 * an error and we want to rollback the PC 644 */ 645 curr_pc = vcpu->arch.pc; 646 er = update_pc(vcpu, cause); 647 if (er == EMULATE_FAIL) 648 return er; 649 650 base = inst.i_format.rs; 651 op_inst = inst.i_format.rt; 652 if (cpu_has_mips_r6) 653 offset = inst.spec3_format.simmediate; 654 else 655 offset = inst.i_format.simmediate; 656 cache = op_inst & CacheOp_Cache; 657 op = op_inst & CacheOp_Op; 658 659 va = arch->gprs[base] + offset; 660 661 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 662 cache, op, base, arch->gprs[base], offset); 663 664 /* Secondary or tirtiary cache ops ignored */ 665 if (cache != Cache_I && cache != Cache_D) 666 return EMULATE_DONE; 667 668 switch (op_inst) { 669 case Index_Invalidate_I: 670 flush_icache_line_indexed(va); 671 return EMULATE_DONE; 672 case Index_Writeback_Inv_D: 673 flush_dcache_line_indexed(va); 674 return EMULATE_DONE; 675 default: 676 break; 677 }; 678 679 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 680 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], 681 offset); 682 /* Rollback PC */ 683 vcpu->arch.pc = curr_pc; 684 685 return EMULATE_FAIL; 686 } 687 688 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, 689 struct kvm_vcpu *vcpu) 690 { 691 enum emulation_result er = EMULATE_DONE; 692 struct kvm_vcpu_arch *arch = &vcpu->arch; 693 struct kvm_run *run = vcpu->run; 694 union mips_instruction inst; 695 int rd, rt, sel; 696 int err; 697 698 /* 699 * Fetch the instruction. 700 */ 701 if (cause & CAUSEF_BD) 702 opc += 1; 703 err = kvm_get_badinstr(opc, vcpu, &inst.word); 704 if (err) 705 return EMULATE_FAIL; 706 707 switch (inst.r_format.opcode) { 708 case cop0_op: 709 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu); 710 break; 711 #ifndef CONFIG_CPU_MIPSR6 712 case cache_op: 713 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 714 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 715 break; 716 #endif 717 case spec3_op: 718 switch (inst.spec3_format.func) { 719 #ifdef CONFIG_CPU_MIPSR6 720 case cache6_op: 721 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 722 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 723 break; 724 #endif 725 case rdhwr_op: 726 if (inst.r_format.rs || (inst.r_format.re >> 3)) 727 goto unknown; 728 729 rd = inst.r_format.rd; 730 rt = inst.r_format.rt; 731 sel = inst.r_format.re & 0x7; 732 733 switch (rd) { 734 case MIPS_HWR_CC: /* Read count register */ 735 arch->gprs[rt] = 736 (long)(int)kvm_mips_read_count(vcpu); 737 break; 738 default: 739 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 740 KVM_TRACE_HWR(rd, sel), 0); 741 goto unknown; 742 }; 743 744 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 745 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); 746 747 er = update_pc(vcpu, cause); 748 break; 749 default: 750 goto unknown; 751 }; 752 break; 753 unknown: 754 755 default: 756 kvm_err("GPSI exception not supported (%p/%#x)\n", 757 opc, inst.word); 758 kvm_arch_vcpu_dump_regs(vcpu); 759 er = EMULATE_FAIL; 760 break; 761 } 762 763 return er; 764 } 765 766 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc, 767 struct kvm_vcpu *vcpu) 768 { 769 enum emulation_result er = EMULATE_DONE; 770 struct kvm_vcpu_arch *arch = &vcpu->arch; 771 union mips_instruction inst; 772 int err; 773 774 /* 775 * Fetch the instruction. 776 */ 777 if (cause & CAUSEF_BD) 778 opc += 1; 779 err = kvm_get_badinstr(opc, vcpu, &inst.word); 780 if (err) 781 return EMULATE_FAIL; 782 783 /* complete MTC0 on behalf of guest and advance EPC */ 784 if (inst.c0r_format.opcode == cop0_op && 785 inst.c0r_format.rs == mtc_op && 786 inst.c0r_format.z == 0) { 787 int rt = inst.c0r_format.rt; 788 int rd = inst.c0r_format.rd; 789 int sel = inst.c0r_format.sel; 790 unsigned int val = arch->gprs[rt]; 791 unsigned int old_val, change; 792 793 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel), 794 val); 795 796 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 797 /* FR bit should read as zero if no FPU */ 798 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 799 val &= ~(ST0_CU1 | ST0_FR); 800 801 /* 802 * Also don't allow FR to be set if host doesn't support 803 * it. 804 */ 805 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) 806 val &= ~ST0_FR; 807 808 old_val = read_gc0_status(); 809 change = val ^ old_val; 810 811 if (change & ST0_FR) { 812 /* 813 * FPU and Vector register state is made 814 * UNPREDICTABLE by a change of FR, so don't 815 * even bother saving it. 816 */ 817 kvm_drop_fpu(vcpu); 818 } 819 820 /* 821 * If MSA state is already live, it is undefined how it 822 * interacts with FR=0 FPU state, and we don't want to 823 * hit reserved instruction exceptions trying to save 824 * the MSA state later when CU=1 && FR=1, so play it 825 * safe and save it first. 826 */ 827 if (change & ST0_CU1 && !(val & ST0_FR) && 828 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 829 kvm_lose_fpu(vcpu); 830 831 write_gc0_status(val); 832 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 833 u32 old_cause = read_gc0_cause(); 834 u32 change = old_cause ^ val; 835 836 /* DC bit enabling/disabling timer? */ 837 if (change & CAUSEF_DC) { 838 if (val & CAUSEF_DC) 839 kvm_mips_count_disable_cause(vcpu); 840 else 841 kvm_mips_count_enable_cause(vcpu); 842 } 843 844 /* Only certain bits are RW to the guest */ 845 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP | 846 CAUSEF_IP0 | CAUSEF_IP1); 847 848 /* WP can only be cleared */ 849 change &= ~CAUSEF_WP | old_cause; 850 851 write_gc0_cause(old_cause ^ change); 852 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */ 853 write_gc0_intctl(val); 854 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 855 old_val = read_gc0_config5(); 856 change = val ^ old_val; 857 /* Handle changes in FPU/MSA modes */ 858 preempt_disable(); 859 860 /* 861 * Propagate FRE changes immediately if the FPU 862 * context is already loaded. 863 */ 864 if (change & MIPS_CONF5_FRE && 865 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 866 change_c0_config5(MIPS_CONF5_FRE, val); 867 868 preempt_enable(); 869 870 val = old_val ^ 871 (change & kvm_vz_config5_guest_wrmask(vcpu)); 872 write_gc0_config5(val); 873 } else { 874 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n", 875 opc, inst.word); 876 er = EMULATE_FAIL; 877 } 878 879 if (er != EMULATE_FAIL) 880 er = update_pc(vcpu, cause); 881 } else { 882 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n", 883 opc, inst.word); 884 er = EMULATE_FAIL; 885 } 886 887 return er; 888 } 889 890 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc, 891 struct kvm_vcpu *vcpu) 892 { 893 enum emulation_result er; 894 union mips_instruction inst; 895 unsigned long curr_pc; 896 int err; 897 898 if (cause & CAUSEF_BD) 899 opc += 1; 900 err = kvm_get_badinstr(opc, vcpu, &inst.word); 901 if (err) 902 return EMULATE_FAIL; 903 904 /* 905 * Update PC and hold onto current PC in case there is 906 * an error and we want to rollback the PC 907 */ 908 curr_pc = vcpu->arch.pc; 909 er = update_pc(vcpu, cause); 910 if (er == EMULATE_FAIL) 911 return er; 912 913 er = kvm_mips_emul_hypcall(vcpu, inst); 914 if (er == EMULATE_FAIL) 915 vcpu->arch.pc = curr_pc; 916 917 return er; 918 } 919 920 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode, 921 u32 cause, 922 u32 *opc, 923 struct kvm_vcpu *vcpu) 924 { 925 u32 inst; 926 927 /* 928 * Fetch the instruction. 929 */ 930 if (cause & CAUSEF_BD) 931 opc += 1; 932 kvm_get_badinstr(opc, vcpu, &inst); 933 934 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n", 935 gexccode, opc, inst, read_gc0_status()); 936 937 return EMULATE_FAIL; 938 } 939 940 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu) 941 { 942 u32 *opc = (u32 *) vcpu->arch.pc; 943 u32 cause = vcpu->arch.host_cp0_cause; 944 enum emulation_result er = EMULATE_DONE; 945 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & 946 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 947 int ret = RESUME_GUEST; 948 949 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode); 950 switch (gexccode) { 951 case MIPS_GCTL0_GEXC_GPSI: 952 ++vcpu->stat.vz_gpsi_exits; 953 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu); 954 break; 955 case MIPS_GCTL0_GEXC_GSFC: 956 ++vcpu->stat.vz_gsfc_exits; 957 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu); 958 break; 959 case MIPS_GCTL0_GEXC_HC: 960 ++vcpu->stat.vz_hc_exits; 961 er = kvm_trap_vz_handle_hc(cause, opc, vcpu); 962 break; 963 case MIPS_GCTL0_GEXC_GRR: 964 ++vcpu->stat.vz_grr_exits; 965 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 966 vcpu); 967 break; 968 case MIPS_GCTL0_GEXC_GVA: 969 ++vcpu->stat.vz_gva_exits; 970 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 971 vcpu); 972 break; 973 case MIPS_GCTL0_GEXC_GHFC: 974 ++vcpu->stat.vz_ghfc_exits; 975 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 976 vcpu); 977 break; 978 case MIPS_GCTL0_GEXC_GPA: 979 ++vcpu->stat.vz_gpa_exits; 980 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 981 vcpu); 982 break; 983 default: 984 ++vcpu->stat.vz_resvd_exits; 985 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 986 vcpu); 987 break; 988 989 } 990 991 if (er == EMULATE_DONE) { 992 ret = RESUME_GUEST; 993 } else if (er == EMULATE_HYPERCALL) { 994 ret = kvm_mips_handle_hypcall(vcpu); 995 } else { 996 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 997 ret = RESUME_HOST; 998 } 999 return ret; 1000 } 1001 1002 /** 1003 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor. 1004 * @vcpu: Virtual CPU context. 1005 * 1006 * Handle when the guest attempts to use a coprocessor which hasn't been allowed 1007 * by the root context. 1008 */ 1009 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) 1010 { 1011 struct kvm_run *run = vcpu->run; 1012 u32 cause = vcpu->arch.host_cp0_cause; 1013 enum emulation_result er = EMULATE_FAIL; 1014 int ret = RESUME_GUEST; 1015 1016 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { 1017 /* 1018 * If guest FPU not present, the FPU operation should have been 1019 * treated as a reserved instruction! 1020 * If FPU already in use, we shouldn't get this at all. 1021 */ 1022 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || 1023 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { 1024 preempt_enable(); 1025 return EMULATE_FAIL; 1026 } 1027 1028 kvm_own_fpu(vcpu); 1029 er = EMULATE_DONE; 1030 } 1031 /* other coprocessors not handled */ 1032 1033 switch (er) { 1034 case EMULATE_DONE: 1035 ret = RESUME_GUEST; 1036 break; 1037 1038 case EMULATE_FAIL: 1039 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1040 ret = RESUME_HOST; 1041 break; 1042 1043 default: 1044 BUG(); 1045 } 1046 return ret; 1047 } 1048 1049 /** 1050 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root. 1051 * @vcpu: Virtual CPU context. 1052 * 1053 * Handle when the guest attempts to use MSA when it is disabled in the root 1054 * context. 1055 */ 1056 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) 1057 { 1058 struct kvm_run *run = vcpu->run; 1059 1060 /* 1061 * If MSA not present or not exposed to guest or FR=0, the MSA operation 1062 * should have been treated as a reserved instruction! 1063 * Same if CU1=1, FR=0. 1064 * If MSA already in use, we shouldn't get this at all. 1065 */ 1066 if (!kvm_mips_guest_has_msa(&vcpu->arch) || 1067 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 || 1068 !(read_gc0_config5() & MIPS_CONF5_MSAEN) || 1069 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { 1070 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1071 return RESUME_HOST; 1072 } 1073 1074 kvm_own_msa(vcpu); 1075 1076 return RESUME_GUEST; 1077 } 1078 1079 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) 1080 { 1081 struct kvm_run *run = vcpu->run; 1082 u32 *opc = (u32 *) vcpu->arch.pc; 1083 u32 cause = vcpu->arch.host_cp0_cause; 1084 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1085 union mips_instruction inst; 1086 enum emulation_result er = EMULATE_DONE; 1087 int err, ret = RESUME_GUEST; 1088 1089 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) { 1090 /* A code fetch fault doesn't count as an MMIO */ 1091 if (kvm_is_ifetch_fault(&vcpu->arch)) { 1092 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1093 return RESUME_HOST; 1094 } 1095 1096 /* Fetch the instruction */ 1097 if (cause & CAUSEF_BD) 1098 opc += 1; 1099 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1100 if (err) { 1101 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1102 return RESUME_HOST; 1103 } 1104 1105 /* Treat as MMIO */ 1106 er = kvm_mips_emulate_load(inst, cause, run, vcpu); 1107 if (er == EMULATE_FAIL) { 1108 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1109 opc, badvaddr); 1110 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1111 } 1112 } 1113 1114 if (er == EMULATE_DONE) { 1115 ret = RESUME_GUEST; 1116 } else if (er == EMULATE_DO_MMIO) { 1117 run->exit_reason = KVM_EXIT_MMIO; 1118 ret = RESUME_HOST; 1119 } else { 1120 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1121 ret = RESUME_HOST; 1122 } 1123 return ret; 1124 } 1125 1126 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu) 1127 { 1128 struct kvm_run *run = vcpu->run; 1129 u32 *opc = (u32 *) vcpu->arch.pc; 1130 u32 cause = vcpu->arch.host_cp0_cause; 1131 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1132 union mips_instruction inst; 1133 enum emulation_result er = EMULATE_DONE; 1134 int err; 1135 int ret = RESUME_GUEST; 1136 1137 /* Just try the access again if we couldn't do the translation */ 1138 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr)) 1139 return RESUME_GUEST; 1140 vcpu->arch.host_cp0_badvaddr = badvaddr; 1141 1142 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) { 1143 /* Fetch the instruction */ 1144 if (cause & CAUSEF_BD) 1145 opc += 1; 1146 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1147 if (err) { 1148 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1149 return RESUME_HOST; 1150 } 1151 1152 /* Treat as MMIO */ 1153 er = kvm_mips_emulate_store(inst, cause, run, vcpu); 1154 if (er == EMULATE_FAIL) { 1155 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1156 opc, badvaddr); 1157 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1158 } 1159 } 1160 1161 if (er == EMULATE_DONE) { 1162 ret = RESUME_GUEST; 1163 } else if (er == EMULATE_DO_MMIO) { 1164 run->exit_reason = KVM_EXIT_MMIO; 1165 ret = RESUME_HOST; 1166 } else { 1167 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1168 ret = RESUME_HOST; 1169 } 1170 return ret; 1171 } 1172 1173 static u64 kvm_vz_get_one_regs[] = { 1174 KVM_REG_MIPS_CP0_INDEX, 1175 KVM_REG_MIPS_CP0_ENTRYLO0, 1176 KVM_REG_MIPS_CP0_ENTRYLO1, 1177 KVM_REG_MIPS_CP0_CONTEXT, 1178 KVM_REG_MIPS_CP0_PAGEMASK, 1179 KVM_REG_MIPS_CP0_PAGEGRAIN, 1180 KVM_REG_MIPS_CP0_WIRED, 1181 KVM_REG_MIPS_CP0_HWRENA, 1182 KVM_REG_MIPS_CP0_BADVADDR, 1183 KVM_REG_MIPS_CP0_COUNT, 1184 KVM_REG_MIPS_CP0_ENTRYHI, 1185 KVM_REG_MIPS_CP0_COMPARE, 1186 KVM_REG_MIPS_CP0_STATUS, 1187 KVM_REG_MIPS_CP0_INTCTL, 1188 KVM_REG_MIPS_CP0_CAUSE, 1189 KVM_REG_MIPS_CP0_EPC, 1190 KVM_REG_MIPS_CP0_PRID, 1191 KVM_REG_MIPS_CP0_EBASE, 1192 KVM_REG_MIPS_CP0_CONFIG, 1193 KVM_REG_MIPS_CP0_CONFIG1, 1194 KVM_REG_MIPS_CP0_CONFIG2, 1195 KVM_REG_MIPS_CP0_CONFIG3, 1196 KVM_REG_MIPS_CP0_CONFIG4, 1197 KVM_REG_MIPS_CP0_CONFIG5, 1198 #ifdef CONFIG_64BIT 1199 KVM_REG_MIPS_CP0_XCONTEXT, 1200 #endif 1201 KVM_REG_MIPS_CP0_ERROREPC, 1202 1203 KVM_REG_MIPS_COUNT_CTL, 1204 KVM_REG_MIPS_COUNT_RESUME, 1205 KVM_REG_MIPS_COUNT_HZ, 1206 }; 1207 1208 static u64 kvm_vz_get_one_regs_kscratch[] = { 1209 KVM_REG_MIPS_CP0_KSCRATCH1, 1210 KVM_REG_MIPS_CP0_KSCRATCH2, 1211 KVM_REG_MIPS_CP0_KSCRATCH3, 1212 KVM_REG_MIPS_CP0_KSCRATCH4, 1213 KVM_REG_MIPS_CP0_KSCRATCH5, 1214 KVM_REG_MIPS_CP0_KSCRATCH6, 1215 }; 1216 1217 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu) 1218 { 1219 unsigned long ret; 1220 1221 ret = ARRAY_SIZE(kvm_vz_get_one_regs); 1222 if (cpu_guest_has_userlocal) 1223 ++ret; 1224 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask); 1225 1226 return ret; 1227 } 1228 1229 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) 1230 { 1231 u64 index; 1232 unsigned int i; 1233 1234 if (copy_to_user(indices, kvm_vz_get_one_regs, 1235 sizeof(kvm_vz_get_one_regs))) 1236 return -EFAULT; 1237 indices += ARRAY_SIZE(kvm_vz_get_one_regs); 1238 1239 if (cpu_guest_has_userlocal) { 1240 index = KVM_REG_MIPS_CP0_USERLOCAL; 1241 if (copy_to_user(indices, &index, sizeof(index))) 1242 return -EFAULT; 1243 ++indices; 1244 } 1245 for (i = 0; i < 6; ++i) { 1246 if (!cpu_guest_has_kscr(i + 2)) 1247 continue; 1248 1249 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i], 1250 sizeof(kvm_vz_get_one_regs_kscratch[i]))) 1251 return -EFAULT; 1252 ++indices; 1253 } 1254 1255 return 0; 1256 } 1257 1258 static inline s64 entrylo_kvm_to_user(unsigned long v) 1259 { 1260 s64 mask, ret = v; 1261 1262 if (BITS_PER_LONG == 32) { 1263 /* 1264 * KVM API exposes 64-bit version of the register, so move the 1265 * RI/XI bits up into place. 1266 */ 1267 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1268 ret &= ~mask; 1269 ret |= ((s64)v & mask) << 32; 1270 } 1271 return ret; 1272 } 1273 1274 static inline unsigned long entrylo_user_to_kvm(s64 v) 1275 { 1276 unsigned long mask, ret = v; 1277 1278 if (BITS_PER_LONG == 32) { 1279 /* 1280 * KVM API exposes 64-bit versiono of the register, so move the 1281 * RI/XI bits down into place. 1282 */ 1283 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1284 ret &= ~mask; 1285 ret |= (v >> 32) & mask; 1286 } 1287 return ret; 1288 } 1289 1290 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, 1291 const struct kvm_one_reg *reg, 1292 s64 *v) 1293 { 1294 struct mips_coproc *cop0 = vcpu->arch.cop0; 1295 unsigned int idx; 1296 1297 switch (reg->id) { 1298 case KVM_REG_MIPS_CP0_INDEX: 1299 *v = (long)read_gc0_index(); 1300 break; 1301 case KVM_REG_MIPS_CP0_ENTRYLO0: 1302 *v = entrylo_kvm_to_user(read_gc0_entrylo0()); 1303 break; 1304 case KVM_REG_MIPS_CP0_ENTRYLO1: 1305 *v = entrylo_kvm_to_user(read_gc0_entrylo1()); 1306 break; 1307 case KVM_REG_MIPS_CP0_CONTEXT: 1308 *v = (long)read_gc0_context(); 1309 break; 1310 case KVM_REG_MIPS_CP0_USERLOCAL: 1311 if (!cpu_guest_has_userlocal) 1312 return -EINVAL; 1313 *v = read_gc0_userlocal(); 1314 break; 1315 case KVM_REG_MIPS_CP0_PAGEMASK: 1316 *v = (long)read_gc0_pagemask(); 1317 break; 1318 case KVM_REG_MIPS_CP0_PAGEGRAIN: 1319 *v = (long)read_gc0_pagegrain(); 1320 break; 1321 case KVM_REG_MIPS_CP0_WIRED: 1322 *v = (long)read_gc0_wired(); 1323 break; 1324 case KVM_REG_MIPS_CP0_HWRENA: 1325 *v = (long)read_gc0_hwrena(); 1326 break; 1327 case KVM_REG_MIPS_CP0_BADVADDR: 1328 *v = (long)read_gc0_badvaddr(); 1329 break; 1330 case KVM_REG_MIPS_CP0_COUNT: 1331 *v = kvm_mips_read_count(vcpu); 1332 break; 1333 case KVM_REG_MIPS_CP0_ENTRYHI: 1334 *v = (long)read_gc0_entryhi(); 1335 break; 1336 case KVM_REG_MIPS_CP0_COMPARE: 1337 *v = (long)read_gc0_compare(); 1338 break; 1339 case KVM_REG_MIPS_CP0_STATUS: 1340 *v = (long)read_gc0_status(); 1341 break; 1342 case KVM_REG_MIPS_CP0_INTCTL: 1343 *v = read_gc0_intctl(); 1344 break; 1345 case KVM_REG_MIPS_CP0_CAUSE: 1346 *v = (long)read_gc0_cause(); 1347 break; 1348 case KVM_REG_MIPS_CP0_EPC: 1349 *v = (long)read_gc0_epc(); 1350 break; 1351 case KVM_REG_MIPS_CP0_PRID: 1352 *v = (long)kvm_read_c0_guest_prid(cop0); 1353 break; 1354 case KVM_REG_MIPS_CP0_EBASE: 1355 *v = kvm_vz_read_gc0_ebase(); 1356 break; 1357 case KVM_REG_MIPS_CP0_CONFIG: 1358 *v = read_gc0_config(); 1359 break; 1360 case KVM_REG_MIPS_CP0_CONFIG1: 1361 if (!cpu_guest_has_conf1) 1362 return -EINVAL; 1363 *v = read_gc0_config1(); 1364 break; 1365 case KVM_REG_MIPS_CP0_CONFIG2: 1366 if (!cpu_guest_has_conf2) 1367 return -EINVAL; 1368 *v = read_gc0_config2(); 1369 break; 1370 case KVM_REG_MIPS_CP0_CONFIG3: 1371 if (!cpu_guest_has_conf3) 1372 return -EINVAL; 1373 *v = read_gc0_config3(); 1374 break; 1375 case KVM_REG_MIPS_CP0_CONFIG4: 1376 if (!cpu_guest_has_conf4) 1377 return -EINVAL; 1378 *v = read_gc0_config4(); 1379 break; 1380 case KVM_REG_MIPS_CP0_CONFIG5: 1381 if (!cpu_guest_has_conf5) 1382 return -EINVAL; 1383 *v = read_gc0_config5(); 1384 break; 1385 #ifdef CONFIG_64BIT 1386 case KVM_REG_MIPS_CP0_XCONTEXT: 1387 *v = read_gc0_xcontext(); 1388 break; 1389 #endif 1390 case KVM_REG_MIPS_CP0_ERROREPC: 1391 *v = (long)read_gc0_errorepc(); 1392 break; 1393 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 1394 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 1395 if (!cpu_guest_has_kscr(idx)) 1396 return -EINVAL; 1397 switch (idx) { 1398 case 2: 1399 *v = (long)read_gc0_kscratch1(); 1400 break; 1401 case 3: 1402 *v = (long)read_gc0_kscratch2(); 1403 break; 1404 case 4: 1405 *v = (long)read_gc0_kscratch3(); 1406 break; 1407 case 5: 1408 *v = (long)read_gc0_kscratch4(); 1409 break; 1410 case 6: 1411 *v = (long)read_gc0_kscratch5(); 1412 break; 1413 case 7: 1414 *v = (long)read_gc0_kscratch6(); 1415 break; 1416 } 1417 break; 1418 case KVM_REG_MIPS_COUNT_CTL: 1419 *v = vcpu->arch.count_ctl; 1420 break; 1421 case KVM_REG_MIPS_COUNT_RESUME: 1422 *v = ktime_to_ns(vcpu->arch.count_resume); 1423 break; 1424 case KVM_REG_MIPS_COUNT_HZ: 1425 *v = vcpu->arch.count_hz; 1426 break; 1427 default: 1428 return -EINVAL; 1429 } 1430 return 0; 1431 } 1432 1433 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, 1434 const struct kvm_one_reg *reg, 1435 s64 v) 1436 { 1437 struct mips_coproc *cop0 = vcpu->arch.cop0; 1438 unsigned int idx; 1439 int ret = 0; 1440 unsigned int cur, change; 1441 1442 switch (reg->id) { 1443 case KVM_REG_MIPS_CP0_INDEX: 1444 write_gc0_index(v); 1445 break; 1446 case KVM_REG_MIPS_CP0_ENTRYLO0: 1447 write_gc0_entrylo0(entrylo_user_to_kvm(v)); 1448 break; 1449 case KVM_REG_MIPS_CP0_ENTRYLO1: 1450 write_gc0_entrylo1(entrylo_user_to_kvm(v)); 1451 break; 1452 case KVM_REG_MIPS_CP0_CONTEXT: 1453 write_gc0_context(v); 1454 break; 1455 case KVM_REG_MIPS_CP0_USERLOCAL: 1456 if (!cpu_guest_has_userlocal) 1457 return -EINVAL; 1458 write_gc0_userlocal(v); 1459 break; 1460 case KVM_REG_MIPS_CP0_PAGEMASK: 1461 write_gc0_pagemask(v); 1462 break; 1463 case KVM_REG_MIPS_CP0_PAGEGRAIN: 1464 write_gc0_pagegrain(v); 1465 break; 1466 case KVM_REG_MIPS_CP0_WIRED: 1467 change_gc0_wired(MIPSR6_WIRED_WIRED, v); 1468 break; 1469 case KVM_REG_MIPS_CP0_HWRENA: 1470 write_gc0_hwrena(v); 1471 break; 1472 case KVM_REG_MIPS_CP0_BADVADDR: 1473 write_gc0_badvaddr(v); 1474 break; 1475 case KVM_REG_MIPS_CP0_COUNT: 1476 kvm_mips_write_count(vcpu, v); 1477 break; 1478 case KVM_REG_MIPS_CP0_ENTRYHI: 1479 write_gc0_entryhi(v); 1480 break; 1481 case KVM_REG_MIPS_CP0_COMPARE: 1482 kvm_mips_write_compare(vcpu, v, false); 1483 break; 1484 case KVM_REG_MIPS_CP0_STATUS: 1485 write_gc0_status(v); 1486 break; 1487 case KVM_REG_MIPS_CP0_INTCTL: 1488 write_gc0_intctl(v); 1489 break; 1490 case KVM_REG_MIPS_CP0_CAUSE: 1491 /* 1492 * If the timer is stopped or started (DC bit) it must look 1493 * atomic with changes to the timer interrupt pending bit (TI). 1494 * A timer interrupt should not happen in between. 1495 */ 1496 if ((read_gc0_cause() ^ v) & CAUSEF_DC) { 1497 if (v & CAUSEF_DC) { 1498 /* disable timer first */ 1499 kvm_mips_count_disable_cause(vcpu); 1500 change_gc0_cause((u32)~CAUSEF_DC, v); 1501 } else { 1502 /* enable timer last */ 1503 change_gc0_cause((u32)~CAUSEF_DC, v); 1504 kvm_mips_count_enable_cause(vcpu); 1505 } 1506 } else { 1507 write_gc0_cause(v); 1508 } 1509 break; 1510 case KVM_REG_MIPS_CP0_EPC: 1511 write_gc0_epc(v); 1512 break; 1513 case KVM_REG_MIPS_CP0_PRID: 1514 kvm_write_c0_guest_prid(cop0, v); 1515 break; 1516 case KVM_REG_MIPS_CP0_EBASE: 1517 kvm_vz_write_gc0_ebase(v); 1518 break; 1519 case KVM_REG_MIPS_CP0_CONFIG: 1520 cur = read_gc0_config(); 1521 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu); 1522 if (change) { 1523 v = cur ^ change; 1524 write_gc0_config(v); 1525 } 1526 break; 1527 case KVM_REG_MIPS_CP0_CONFIG1: 1528 if (!cpu_guest_has_conf1) 1529 break; 1530 cur = read_gc0_config1(); 1531 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu); 1532 if (change) { 1533 v = cur ^ change; 1534 write_gc0_config1(v); 1535 } 1536 break; 1537 case KVM_REG_MIPS_CP0_CONFIG2: 1538 if (!cpu_guest_has_conf2) 1539 break; 1540 cur = read_gc0_config2(); 1541 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu); 1542 if (change) { 1543 v = cur ^ change; 1544 write_gc0_config2(v); 1545 } 1546 break; 1547 case KVM_REG_MIPS_CP0_CONFIG3: 1548 if (!cpu_guest_has_conf3) 1549 break; 1550 cur = read_gc0_config3(); 1551 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu); 1552 if (change) { 1553 v = cur ^ change; 1554 write_gc0_config3(v); 1555 } 1556 break; 1557 case KVM_REG_MIPS_CP0_CONFIG4: 1558 if (!cpu_guest_has_conf4) 1559 break; 1560 cur = read_gc0_config4(); 1561 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu); 1562 if (change) { 1563 v = cur ^ change; 1564 write_gc0_config4(v); 1565 } 1566 break; 1567 case KVM_REG_MIPS_CP0_CONFIG5: 1568 if (!cpu_guest_has_conf5) 1569 break; 1570 cur = read_gc0_config5(); 1571 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu); 1572 if (change) { 1573 v = cur ^ change; 1574 write_gc0_config5(v); 1575 } 1576 break; 1577 #ifdef CONFIG_64BIT 1578 case KVM_REG_MIPS_CP0_XCONTEXT: 1579 write_gc0_xcontext(v); 1580 break; 1581 #endif 1582 case KVM_REG_MIPS_CP0_ERROREPC: 1583 write_gc0_errorepc(v); 1584 break; 1585 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 1586 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 1587 if (!cpu_guest_has_kscr(idx)) 1588 return -EINVAL; 1589 switch (idx) { 1590 case 2: 1591 write_gc0_kscratch1(v); 1592 break; 1593 case 3: 1594 write_gc0_kscratch2(v); 1595 break; 1596 case 4: 1597 write_gc0_kscratch3(v); 1598 break; 1599 case 5: 1600 write_gc0_kscratch4(v); 1601 break; 1602 case 6: 1603 write_gc0_kscratch5(v); 1604 break; 1605 case 7: 1606 write_gc0_kscratch6(v); 1607 break; 1608 } 1609 break; 1610 case KVM_REG_MIPS_COUNT_CTL: 1611 ret = kvm_mips_set_count_ctl(vcpu, v); 1612 break; 1613 case KVM_REG_MIPS_COUNT_RESUME: 1614 ret = kvm_mips_set_count_resume(vcpu, v); 1615 break; 1616 case KVM_REG_MIPS_COUNT_HZ: 1617 ret = kvm_mips_set_count_hz(vcpu, v); 1618 break; 1619 default: 1620 return -EINVAL; 1621 } 1622 return ret; 1623 } 1624 1625 #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache) 1626 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu) 1627 { 1628 unsigned long guestid = guestid_cache(cpu); 1629 1630 if (!(++guestid & GUESTID_MASK)) { 1631 if (cpu_has_vtag_icache) 1632 flush_icache_all(); 1633 1634 if (!guestid) /* fix version if needed */ 1635 guestid = GUESTID_FIRST_VERSION; 1636 1637 ++guestid; /* guestid 0 reserved for root */ 1638 1639 /* start new guestid cycle */ 1640 kvm_vz_local_flush_roottlb_all_guests(); 1641 kvm_vz_local_flush_guesttlb_all(); 1642 } 1643 1644 guestid_cache(cpu) = guestid; 1645 } 1646 1647 /* Returns 1 if the guest TLB may be clobbered */ 1648 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu) 1649 { 1650 int ret = 0; 1651 int i; 1652 1653 if (!vcpu->requests) 1654 return 0; 1655 1656 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1657 if (cpu_has_guestid) { 1658 /* Drop all GuestIDs for this VCPU */ 1659 for_each_possible_cpu(i) 1660 vcpu->arch.vzguestid[i] = 0; 1661 /* This will clobber guest TLB contents too */ 1662 ret = 1; 1663 } 1664 /* 1665 * For Root ASID Dealias (RAD) we don't do anything here, but we 1666 * still need the request to ensure we recheck asid_flush_mask. 1667 * We can still return 0 as only the root TLB will be affected 1668 * by a root ASID flush. 1669 */ 1670 } 1671 1672 return ret; 1673 } 1674 1675 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu) 1676 { 1677 unsigned int wired = read_gc0_wired(); 1678 struct kvm_mips_tlb *tlbs; 1679 int i; 1680 1681 /* Expand the wired TLB array if necessary */ 1682 wired &= MIPSR6_WIRED_WIRED; 1683 if (wired > vcpu->arch.wired_tlb_limit) { 1684 tlbs = krealloc(vcpu->arch.wired_tlb, wired * 1685 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); 1686 if (WARN_ON(!tlbs)) { 1687 /* Save whatever we can */ 1688 wired = vcpu->arch.wired_tlb_limit; 1689 } else { 1690 vcpu->arch.wired_tlb = tlbs; 1691 vcpu->arch.wired_tlb_limit = wired; 1692 } 1693 } 1694 1695 if (wired) 1696 /* Save wired entries from the guest TLB */ 1697 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); 1698 /* Invalidate any dropped entries since last time */ 1699 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { 1700 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 1701 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; 1702 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; 1703 vcpu->arch.wired_tlb[i].tlb_mask = 0; 1704 } 1705 vcpu->arch.wired_tlb_used = wired; 1706 } 1707 1708 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu) 1709 { 1710 /* Load wired entries into the guest TLB */ 1711 if (vcpu->arch.wired_tlb) 1712 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, 1713 vcpu->arch.wired_tlb_used); 1714 } 1715 1716 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) 1717 { 1718 struct kvm *kvm = vcpu->kvm; 1719 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; 1720 bool migrated; 1721 1722 /* 1723 * Are we entering guest context on a different CPU to last time? 1724 * If so, the VCPU's guest TLB state on this CPU may be stale. 1725 */ 1726 migrated = (vcpu->arch.last_exec_cpu != cpu); 1727 vcpu->arch.last_exec_cpu = cpu; 1728 1729 /* 1730 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and 1731 * remains set until another vcpu is loaded in. As a rule GuestRID 1732 * remains zeroed when in root context unless the kernel is busy 1733 * manipulating guest tlb entries. 1734 */ 1735 if (cpu_has_guestid) { 1736 /* 1737 * Check if our GuestID is of an older version and thus invalid. 1738 * 1739 * We also discard the stored GuestID if we've executed on 1740 * another CPU, as the guest mappings may have changed without 1741 * hypervisor knowledge. 1742 */ 1743 if (migrated || 1744 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & 1745 GUESTID_VERSION_MASK) { 1746 kvm_vz_get_new_guestid(cpu, vcpu); 1747 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); 1748 trace_kvm_guestid_change(vcpu, 1749 vcpu->arch.vzguestid[cpu]); 1750 } 1751 1752 /* Restore GuestID */ 1753 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); 1754 } else { 1755 /* 1756 * The Guest TLB only stores a single guest's TLB state, so 1757 * flush it if another VCPU has executed on this CPU. 1758 * 1759 * We also flush if we've executed on another CPU, as the guest 1760 * mappings may have changed without hypervisor knowledge. 1761 */ 1762 if (migrated || last_exec_vcpu[cpu] != vcpu) 1763 kvm_vz_local_flush_guesttlb_all(); 1764 last_exec_vcpu[cpu] = vcpu; 1765 1766 /* 1767 * Root ASID dealiases guest GPA mappings in the root TLB. 1768 * Allocate new root ASID if needed. 1769 */ 1770 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) 1771 || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) & 1772 asid_version_mask(cpu)) 1773 get_new_mmu_context(gpa_mm, cpu); 1774 } 1775 } 1776 1777 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1778 { 1779 struct mips_coproc *cop0 = vcpu->arch.cop0; 1780 bool migrated, all; 1781 1782 /* 1783 * Have we migrated to a different CPU? 1784 * If so, any old guest TLB state may be stale. 1785 */ 1786 migrated = (vcpu->arch.last_sched_cpu != cpu); 1787 1788 /* 1789 * Was this the last VCPU to run on this CPU? 1790 * If not, any old guest state from this VCPU will have been clobbered. 1791 */ 1792 all = migrated || (last_vcpu[cpu] != vcpu); 1793 last_vcpu[cpu] = vcpu; 1794 1795 /* 1796 * Restore CP0_Wired unconditionally as we clear it after use, and 1797 * restore wired guest TLB entries (while in guest context). 1798 */ 1799 kvm_restore_gc0_wired(cop0); 1800 if (current->flags & PF_VCPU) { 1801 tlbw_use_hazard(); 1802 kvm_vz_vcpu_load_tlb(vcpu, cpu); 1803 kvm_vz_vcpu_load_wired(vcpu); 1804 } 1805 1806 /* 1807 * Restore timer state regardless, as e.g. Cause.TI can change over time 1808 * if left unmaintained. 1809 */ 1810 kvm_vz_restore_timer(vcpu); 1811 1812 /* Don't bother restoring registers multiple times unless necessary */ 1813 if (!all) 1814 return 0; 1815 1816 /* 1817 * Restore config registers first, as some implementations restrict 1818 * writes to other registers when the corresponding feature bits aren't 1819 * set. For example Status.CU1 cannot be set unless Config1.FP is set. 1820 */ 1821 kvm_restore_gc0_config(cop0); 1822 if (cpu_guest_has_conf1) 1823 kvm_restore_gc0_config1(cop0); 1824 if (cpu_guest_has_conf2) 1825 kvm_restore_gc0_config2(cop0); 1826 if (cpu_guest_has_conf3) 1827 kvm_restore_gc0_config3(cop0); 1828 if (cpu_guest_has_conf4) 1829 kvm_restore_gc0_config4(cop0); 1830 if (cpu_guest_has_conf5) 1831 kvm_restore_gc0_config5(cop0); 1832 if (cpu_guest_has_conf6) 1833 kvm_restore_gc0_config6(cop0); 1834 if (cpu_guest_has_conf7) 1835 kvm_restore_gc0_config7(cop0); 1836 1837 kvm_restore_gc0_index(cop0); 1838 kvm_restore_gc0_entrylo0(cop0); 1839 kvm_restore_gc0_entrylo1(cop0); 1840 kvm_restore_gc0_context(cop0); 1841 #ifdef CONFIG_64BIT 1842 kvm_restore_gc0_xcontext(cop0); 1843 #endif 1844 kvm_restore_gc0_pagemask(cop0); 1845 kvm_restore_gc0_pagegrain(cop0); 1846 kvm_restore_gc0_hwrena(cop0); 1847 kvm_restore_gc0_badvaddr(cop0); 1848 kvm_restore_gc0_entryhi(cop0); 1849 kvm_restore_gc0_status(cop0); 1850 kvm_restore_gc0_intctl(cop0); 1851 kvm_restore_gc0_epc(cop0); 1852 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0)); 1853 if (cpu_guest_has_userlocal) 1854 kvm_restore_gc0_userlocal(cop0); 1855 1856 kvm_restore_gc0_errorepc(cop0); 1857 1858 /* restore KScratch registers if enabled in guest */ 1859 if (cpu_guest_has_conf4) { 1860 if (cpu_guest_has_kscr(2)) 1861 kvm_restore_gc0_kscratch1(cop0); 1862 if (cpu_guest_has_kscr(3)) 1863 kvm_restore_gc0_kscratch2(cop0); 1864 if (cpu_guest_has_kscr(4)) 1865 kvm_restore_gc0_kscratch3(cop0); 1866 if (cpu_guest_has_kscr(5)) 1867 kvm_restore_gc0_kscratch4(cop0); 1868 if (cpu_guest_has_kscr(6)) 1869 kvm_restore_gc0_kscratch5(cop0); 1870 if (cpu_guest_has_kscr(7)) 1871 kvm_restore_gc0_kscratch6(cop0); 1872 } 1873 1874 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */ 1875 if (cpu_has_guestctl2) 1876 write_c0_guestctl2( 1877 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); 1878 1879 return 0; 1880 } 1881 1882 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) 1883 { 1884 struct mips_coproc *cop0 = vcpu->arch.cop0; 1885 1886 if (current->flags & PF_VCPU) 1887 kvm_vz_vcpu_save_wired(vcpu); 1888 1889 kvm_lose_fpu(vcpu); 1890 1891 kvm_save_gc0_index(cop0); 1892 kvm_save_gc0_entrylo0(cop0); 1893 kvm_save_gc0_entrylo1(cop0); 1894 kvm_save_gc0_context(cop0); 1895 #ifdef CONFIG_64BIT 1896 kvm_save_gc0_xcontext(cop0); 1897 #endif 1898 kvm_save_gc0_pagemask(cop0); 1899 kvm_save_gc0_pagegrain(cop0); 1900 kvm_save_gc0_wired(cop0); 1901 /* allow wired TLB entries to be overwritten */ 1902 clear_gc0_wired(MIPSR6_WIRED_WIRED); 1903 kvm_save_gc0_hwrena(cop0); 1904 kvm_save_gc0_badvaddr(cop0); 1905 kvm_save_gc0_entryhi(cop0); 1906 kvm_save_gc0_status(cop0); 1907 kvm_save_gc0_intctl(cop0); 1908 kvm_save_gc0_epc(cop0); 1909 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase()); 1910 if (cpu_guest_has_userlocal) 1911 kvm_save_gc0_userlocal(cop0); 1912 1913 /* only save implemented config registers */ 1914 kvm_save_gc0_config(cop0); 1915 if (cpu_guest_has_conf1) 1916 kvm_save_gc0_config1(cop0); 1917 if (cpu_guest_has_conf2) 1918 kvm_save_gc0_config2(cop0); 1919 if (cpu_guest_has_conf3) 1920 kvm_save_gc0_config3(cop0); 1921 if (cpu_guest_has_conf4) 1922 kvm_save_gc0_config4(cop0); 1923 if (cpu_guest_has_conf5) 1924 kvm_save_gc0_config5(cop0); 1925 if (cpu_guest_has_conf6) 1926 kvm_save_gc0_config6(cop0); 1927 if (cpu_guest_has_conf7) 1928 kvm_save_gc0_config7(cop0); 1929 1930 kvm_save_gc0_errorepc(cop0); 1931 1932 /* save KScratch registers if enabled in guest */ 1933 if (cpu_guest_has_conf4) { 1934 if (cpu_guest_has_kscr(2)) 1935 kvm_save_gc0_kscratch1(cop0); 1936 if (cpu_guest_has_kscr(3)) 1937 kvm_save_gc0_kscratch2(cop0); 1938 if (cpu_guest_has_kscr(4)) 1939 kvm_save_gc0_kscratch3(cop0); 1940 if (cpu_guest_has_kscr(5)) 1941 kvm_save_gc0_kscratch4(cop0); 1942 if (cpu_guest_has_kscr(6)) 1943 kvm_save_gc0_kscratch5(cop0); 1944 if (cpu_guest_has_kscr(7)) 1945 kvm_save_gc0_kscratch6(cop0); 1946 } 1947 1948 kvm_vz_save_timer(vcpu); 1949 1950 /* save Root.GuestCtl2 in unused Guest guestctl2 register */ 1951 if (cpu_has_guestctl2) 1952 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 1953 read_c0_guestctl2(); 1954 1955 return 0; 1956 } 1957 1958 /** 1959 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB. 1960 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries). 1961 * 1962 * Attempt to resize the guest VTLB by writing guest Config registers. This is 1963 * necessary for cores with a shared root/guest TLB to avoid overlap with wired 1964 * entries in the root VTLB. 1965 * 1966 * Returns: The resulting guest VTLB size. 1967 */ 1968 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size) 1969 { 1970 unsigned int config4 = 0, ret = 0, limit; 1971 1972 /* Write MMUSize - 1 into guest Config registers */ 1973 if (cpu_guest_has_conf1) 1974 change_gc0_config1(MIPS_CONF1_TLBS, 1975 (size - 1) << MIPS_CONF1_TLBS_SHIFT); 1976 if (cpu_guest_has_conf4) { 1977 config4 = read_gc0_config4(); 1978 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 1979 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) { 1980 config4 &= ~MIPS_CONF4_VTLBSIZEEXT; 1981 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 1982 MIPS_CONF4_VTLBSIZEEXT_SHIFT; 1983 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 1984 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) { 1985 config4 &= ~MIPS_CONF4_MMUSIZEEXT; 1986 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 1987 MIPS_CONF4_MMUSIZEEXT_SHIFT; 1988 } 1989 write_gc0_config4(config4); 1990 } 1991 1992 /* 1993 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it 1994 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write 1995 * not dropped) 1996 */ 1997 if (cpu_has_mips_r6) { 1998 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >> 1999 MIPSR6_WIRED_LIMIT_SHIFT; 2000 if (size - 1 <= limit) 2001 limit = 0; 2002 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT); 2003 } 2004 2005 /* Read back MMUSize - 1 */ 2006 back_to_back_c0_hazard(); 2007 if (cpu_guest_has_conf1) 2008 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >> 2009 MIPS_CONF1_TLBS_SHIFT; 2010 if (config4) { 2011 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 2012 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) 2013 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> 2014 MIPS_CONF4_VTLBSIZEEXT_SHIFT) << 2015 MIPS_CONF1_TLBS_SIZE; 2016 else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 2017 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) 2018 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >> 2019 MIPS_CONF4_MMUSIZEEXT_SHIFT) << 2020 MIPS_CONF1_TLBS_SIZE; 2021 } 2022 return ret + 1; 2023 } 2024 2025 static int kvm_vz_hardware_enable(void) 2026 { 2027 unsigned int mmu_size, guest_mmu_size, ftlb_size; 2028 2029 /* 2030 * ImgTec cores tend to use a shared root/guest TLB. To avoid overlap of 2031 * root wired and guest entries, the guest TLB may need resizing. 2032 */ 2033 mmu_size = current_cpu_data.tlbsizevtlb; 2034 ftlb_size = current_cpu_data.tlbsize - mmu_size; 2035 2036 /* Try switching to maximum guest VTLB size for flush */ 2037 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size); 2038 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2039 kvm_vz_local_flush_guesttlb_all(); 2040 2041 /* 2042 * Reduce to make space for root wired entries and at least 2 root 2043 * non-wired entries. This does assume that long-term wired entries 2044 * won't be added later. 2045 */ 2046 guest_mmu_size = mmu_size - num_wired_entries() - 2; 2047 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size); 2048 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2049 2050 /* 2051 * Write the VTLB size, but if another CPU has already written, check it 2052 * matches or we won't provide a consistent view to the guest. If this 2053 * ever happens it suggests an asymmetric number of wired entries. 2054 */ 2055 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) && 2056 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size, 2057 "Available guest VTLB size mismatch")) 2058 return -EINVAL; 2059 2060 /* 2061 * Enable virtualization features granting guest direct control of 2062 * certain features: 2063 * CP0=1: Guest coprocessor 0 context. 2064 * AT=Guest: Guest MMU. 2065 * CG=1: Hit (virtual address) CACHE operations (optional). 2066 * CF=1: Guest Config registers. 2067 * CGI=1: Indexed flush CACHE operations (optional). 2068 */ 2069 write_c0_guestctl0(MIPS_GCTL0_CP0 | 2070 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) | 2071 MIPS_GCTL0_CG | MIPS_GCTL0_CF); 2072 if (cpu_has_guestctl0ext) 2073 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); 2074 2075 if (cpu_has_guestid) { 2076 write_c0_guestctl1(0); 2077 kvm_vz_local_flush_roottlb_all_guests(); 2078 2079 GUESTID_MASK = current_cpu_data.guestid_mask; 2080 GUESTID_FIRST_VERSION = GUESTID_MASK + 1; 2081 GUESTID_VERSION_MASK = ~GUESTID_MASK; 2082 2083 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION; 2084 } 2085 2086 /* clear any pending injected virtual guest interrupts */ 2087 if (cpu_has_guestctl2) 2088 clear_c0_guestctl2(0x3f << 10); 2089 2090 return 0; 2091 } 2092 2093 static void kvm_vz_hardware_disable(void) 2094 { 2095 kvm_vz_local_flush_guesttlb_all(); 2096 2097 if (cpu_has_guestid) { 2098 write_c0_guestctl1(0); 2099 kvm_vz_local_flush_roottlb_all_guests(); 2100 } 2101 } 2102 2103 static int kvm_vz_check_extension(struct kvm *kvm, long ext) 2104 { 2105 int r; 2106 2107 switch (ext) { 2108 case KVM_CAP_MIPS_VZ: 2109 /* we wouldn't be here unless cpu_has_vz */ 2110 r = 1; 2111 break; 2112 #ifdef CONFIG_64BIT 2113 case KVM_CAP_MIPS_64BIT: 2114 /* We support 64-bit registers/operations and addresses */ 2115 r = 2; 2116 break; 2117 #endif 2118 default: 2119 r = 0; 2120 break; 2121 } 2122 2123 return r; 2124 } 2125 2126 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu) 2127 { 2128 int i; 2129 2130 for_each_possible_cpu(i) 2131 vcpu->arch.vzguestid[i] = 0; 2132 2133 return 0; 2134 } 2135 2136 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu) 2137 { 2138 int cpu; 2139 2140 /* 2141 * If the VCPU is freed and reused as another VCPU, we don't want the 2142 * matching pointer wrongly hanging around in last_vcpu[] or 2143 * last_exec_vcpu[]. 2144 */ 2145 for_each_possible_cpu(cpu) { 2146 if (last_vcpu[cpu] == vcpu) 2147 last_vcpu[cpu] = NULL; 2148 if (last_exec_vcpu[cpu] == vcpu) 2149 last_exec_vcpu[cpu] = NULL; 2150 } 2151 } 2152 2153 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) 2154 { 2155 struct mips_coproc *cop0 = vcpu->arch.cop0; 2156 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ 2157 2158 /* 2159 * Start off the timer at the same frequency as the host timer, but the 2160 * soft timer doesn't handle frequencies greater than 1GHz yet. 2161 */ 2162 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC) 2163 count_hz = mips_hpt_frequency; 2164 kvm_mips_init_count(vcpu, count_hz); 2165 2166 /* 2167 * Initialize guest register state to valid architectural reset state. 2168 */ 2169 2170 /* PageGrain */ 2171 if (cpu_has_mips_r6) 2172 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC); 2173 /* Wired */ 2174 if (cpu_has_mips_r6) 2175 kvm_write_sw_gc0_wired(cop0, 2176 read_gc0_wired() & MIPSR6_WIRED_LIMIT); 2177 /* Status */ 2178 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL); 2179 if (cpu_has_mips_r6) 2180 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status()); 2181 /* IntCtl */ 2182 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() & 2183 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI)); 2184 /* PRId */ 2185 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id); 2186 /* EBase */ 2187 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); 2188 /* Config */ 2189 kvm_save_gc0_config(cop0); 2190 /* architecturally writable (e.g. from guest) */ 2191 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK, 2192 _page_cachable_default >> _CACHE_SHIFT); 2193 /* architecturally read only, but maybe writable from root */ 2194 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config()); 2195 if (cpu_guest_has_conf1) { 2196 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M); 2197 /* Config1 */ 2198 kvm_save_gc0_config1(cop0); 2199 /* architecturally read only, but maybe writable from root */ 2200 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 | 2201 MIPS_CONF1_MD | 2202 MIPS_CONF1_PC | 2203 MIPS_CONF1_WR | 2204 MIPS_CONF1_CA | 2205 MIPS_CONF1_FP); 2206 } 2207 if (cpu_guest_has_conf2) { 2208 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M); 2209 /* Config2 */ 2210 kvm_save_gc0_config2(cop0); 2211 } 2212 if (cpu_guest_has_conf3) { 2213 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M); 2214 /* Config3 */ 2215 kvm_save_gc0_config3(cop0); 2216 /* architecturally writable (e.g. from guest) */ 2217 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE); 2218 /* architecturally read only, but maybe writable from root */ 2219 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA | 2220 MIPS_CONF3_BPG | 2221 MIPS_CONF3_ULRI | 2222 MIPS_CONF3_DSP | 2223 MIPS_CONF3_CTXTC | 2224 MIPS_CONF3_ITL | 2225 MIPS_CONF3_LPA | 2226 MIPS_CONF3_VEIC | 2227 MIPS_CONF3_VINT | 2228 MIPS_CONF3_SP | 2229 MIPS_CONF3_CDMM | 2230 MIPS_CONF3_MT | 2231 MIPS_CONF3_SM | 2232 MIPS_CONF3_TL); 2233 } 2234 if (cpu_guest_has_conf4) { 2235 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M); 2236 /* Config4 */ 2237 kvm_save_gc0_config4(cop0); 2238 } 2239 if (cpu_guest_has_conf5) { 2240 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M); 2241 /* Config5 */ 2242 kvm_save_gc0_config5(cop0); 2243 /* architecturally writable (e.g. from guest) */ 2244 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K | 2245 MIPS_CONF5_CV | 2246 MIPS_CONF5_MSAEN | 2247 MIPS_CONF5_UFE | 2248 MIPS_CONF5_FRE | 2249 MIPS_CONF5_SBRI | 2250 MIPS_CONF5_UFR); 2251 /* architecturally read only, but maybe writable from root */ 2252 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP); 2253 } 2254 2255 /* start with no pending virtual guest interrupts */ 2256 if (cpu_has_guestctl2) 2257 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; 2258 2259 /* Put PC at reset vector */ 2260 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); 2261 2262 return 0; 2263 } 2264 2265 static void kvm_vz_flush_shadow_all(struct kvm *kvm) 2266 { 2267 if (cpu_has_guestid) { 2268 /* Flush GuestID for each VCPU individually */ 2269 kvm_flush_remote_tlbs(kvm); 2270 } else { 2271 /* 2272 * For each CPU there is a single GPA ASID used by all VCPUs in 2273 * the VM, so it doesn't make sense for the VCPUs to handle 2274 * invalidation of these ASIDs individually. 2275 * 2276 * Instead mark all CPUs as needing ASID invalidation in 2277 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to 2278 * kick any running VCPUs so they check asid_flush_mask. 2279 */ 2280 cpumask_setall(&kvm->arch.asid_flush_mask); 2281 kvm_flush_remote_tlbs(kvm); 2282 } 2283 } 2284 2285 static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, 2286 const struct kvm_memory_slot *slot) 2287 { 2288 kvm_vz_flush_shadow_all(kvm); 2289 } 2290 2291 static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu) 2292 { 2293 int cpu = smp_processor_id(); 2294 int preserve_guest_tlb; 2295 2296 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu); 2297 2298 if (preserve_guest_tlb) 2299 kvm_vz_vcpu_save_wired(vcpu); 2300 2301 kvm_vz_vcpu_load_tlb(vcpu, cpu); 2302 2303 if (preserve_guest_tlb) 2304 kvm_vz_vcpu_load_wired(vcpu); 2305 } 2306 2307 static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) 2308 { 2309 int cpu = smp_processor_id(); 2310 int r; 2311 2312 /* Check if we have any exceptions/interrupts pending */ 2313 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause()); 2314 2315 kvm_vz_check_requests(vcpu, cpu); 2316 kvm_vz_vcpu_load_tlb(vcpu, cpu); 2317 kvm_vz_vcpu_load_wired(vcpu); 2318 2319 r = vcpu->arch.vcpu_run(run, vcpu); 2320 2321 kvm_vz_vcpu_save_wired(vcpu); 2322 2323 return r; 2324 } 2325 2326 static struct kvm_mips_callbacks kvm_vz_callbacks = { 2327 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable, 2328 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss, 2329 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss, 2330 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss, 2331 .handle_addr_err_st = kvm_trap_vz_no_handler, 2332 .handle_addr_err_ld = kvm_trap_vz_no_handler, 2333 .handle_syscall = kvm_trap_vz_no_handler, 2334 .handle_res_inst = kvm_trap_vz_no_handler, 2335 .handle_break = kvm_trap_vz_no_handler, 2336 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled, 2337 .handle_guest_exit = kvm_trap_vz_handle_guest_exit, 2338 2339 .hardware_enable = kvm_vz_hardware_enable, 2340 .hardware_disable = kvm_vz_hardware_disable, 2341 .check_extension = kvm_vz_check_extension, 2342 .vcpu_init = kvm_vz_vcpu_init, 2343 .vcpu_uninit = kvm_vz_vcpu_uninit, 2344 .vcpu_setup = kvm_vz_vcpu_setup, 2345 .flush_shadow_all = kvm_vz_flush_shadow_all, 2346 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot, 2347 .gva_to_gpa = kvm_vz_gva_to_gpa_cb, 2348 .queue_timer_int = kvm_vz_queue_timer_int_cb, 2349 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb, 2350 .queue_io_int = kvm_vz_queue_io_int_cb, 2351 .dequeue_io_int = kvm_vz_dequeue_io_int_cb, 2352 .irq_deliver = kvm_vz_irq_deliver_cb, 2353 .irq_clear = kvm_vz_irq_clear_cb, 2354 .num_regs = kvm_vz_num_regs, 2355 .copy_reg_indices = kvm_vz_copy_reg_indices, 2356 .get_one_reg = kvm_vz_get_one_reg, 2357 .set_one_reg = kvm_vz_set_one_reg, 2358 .vcpu_load = kvm_vz_vcpu_load, 2359 .vcpu_put = kvm_vz_vcpu_put, 2360 .vcpu_run = kvm_vz_vcpu_run, 2361 .vcpu_reenter = kvm_vz_vcpu_reenter, 2362 }; 2363 2364 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) 2365 { 2366 if (!cpu_has_vz) 2367 return -ENODEV; 2368 2369 /* 2370 * VZ requires at least 2 KScratch registers, so it should have been 2371 * possible to allocate pgd_reg. 2372 */ 2373 if (WARN(pgd_reg == -1, 2374 "pgd_reg not allocated even though cpu_has_vz\n")) 2375 return -ENODEV; 2376 2377 pr_info("Starting KVM with MIPS VZ extensions\n"); 2378 2379 *install_callbacks = &kvm_vz_callbacks; 2380 return 0; 2381 } 2382