1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: Support for hardware virtualization extensions 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Yann Le Du <ledu@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/preempt.h> 16 #include <linux/vmalloc.h> 17 #include <asm/cacheflush.h> 18 #include <asm/cacheops.h> 19 #include <asm/cmpxchg.h> 20 #include <asm/fpu.h> 21 #include <asm/hazards.h> 22 #include <asm/inst.h> 23 #include <asm/mmu_context.h> 24 #include <asm/r4kcache.h> 25 #include <asm/time.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbex.h> 28 29 #include <linux/kvm_host.h> 30 31 #include "interrupt.h" 32 33 #include "trace.h" 34 35 /* Pointers to last VCPU loaded on each physical CPU */ 36 static struct kvm_vcpu *last_vcpu[NR_CPUS]; 37 /* Pointers to last VCPU executed on each physical CPU */ 38 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS]; 39 40 /* 41 * Number of guest VTLB entries to use, so we can catch inconsistency between 42 * CPUs. 43 */ 44 static unsigned int kvm_vz_guest_vtlb_size; 45 46 static inline long kvm_vz_read_gc0_ebase(void) 47 { 48 if (sizeof(long) == 8 && cpu_has_ebase_wg) 49 return read_gc0_ebase_64(); 50 else 51 return read_gc0_ebase(); 52 } 53 54 static inline void kvm_vz_write_gc0_ebase(long v) 55 { 56 /* 57 * First write with WG=1 to write upper bits, then write again in case 58 * WG should be left at 0. 59 * write_gc0_ebase_64() is no longer UNDEFINED since R6. 60 */ 61 if (sizeof(long) == 8 && 62 (cpu_has_mips64r6 || cpu_has_ebase_wg)) { 63 write_gc0_ebase_64(v | MIPS_EBASE_WG); 64 write_gc0_ebase_64(v); 65 } else { 66 write_gc0_ebase(v | MIPS_EBASE_WG); 67 write_gc0_ebase(v); 68 } 69 } 70 71 /* 72 * These Config bits may be writable by the guest: 73 * Config: [K23, KU] (!TLB), K0 74 * Config1: (none) 75 * Config2: [TU, SU] (impl) 76 * Config3: ISAOnExc 77 * Config4: FTLBPageSize 78 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR 79 */ 80 81 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu) 82 { 83 return CONF_CM_CMASK; 84 } 85 86 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu) 87 { 88 return 0; 89 } 90 91 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu) 92 { 93 return 0; 94 } 95 96 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu) 97 { 98 return MIPS_CONF3_ISA_OE; 99 } 100 101 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu) 102 { 103 /* no need to be exact */ 104 return MIPS_CONF4_VFTLBPAGESIZE; 105 } 106 107 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu) 108 { 109 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI; 110 111 /* Permit MSAEn changes if MSA supported and enabled */ 112 if (kvm_mips_guest_has_msa(&vcpu->arch)) 113 mask |= MIPS_CONF5_MSAEN; 114 115 /* 116 * Permit guest FPU mode changes if FPU is enabled and the relevant 117 * feature exists according to FIR register. 118 */ 119 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 120 if (cpu_has_ufr) 121 mask |= MIPS_CONF5_UFR; 122 if (cpu_has_fre) 123 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE; 124 } 125 126 return mask; 127 } 128 129 /* 130 * VZ optionally allows these additional Config bits to be written by root: 131 * Config: M, [MT] 132 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP 133 * Config2: M 134 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP, CTXTC, ITL, LPA, VEIC, 135 * VInt, SP, CDMM, MT, SM, TL] 136 * Config4: M, [VTLBSizeExt, MMUSizeExt] 137 * Config5: [MRP] 138 */ 139 140 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu) 141 { 142 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M; 143 } 144 145 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu) 146 { 147 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M; 148 149 /* Permit FPU to be present if FPU is supported */ 150 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 151 mask |= MIPS_CONF1_FP; 152 153 return mask; 154 } 155 156 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu) 157 { 158 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M; 159 } 160 161 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu) 162 { 163 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M | 164 MIPS_CONF3_ULRI; 165 166 /* Permit MSA to be present if MSA is supported */ 167 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 168 mask |= MIPS_CONF3_MSA; 169 170 return mask; 171 } 172 173 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu) 174 { 175 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M; 176 } 177 178 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu) 179 { 180 return kvm_vz_config5_guest_wrmask(vcpu); 181 } 182 183 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) 184 { 185 /* VZ guest has already converted gva to gpa */ 186 return gva; 187 } 188 189 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 190 { 191 set_bit(priority, &vcpu->arch.pending_exceptions); 192 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 193 } 194 195 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 196 { 197 clear_bit(priority, &vcpu->arch.pending_exceptions); 198 set_bit(priority, &vcpu->arch.pending_exceptions_clr); 199 } 200 201 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu) 202 { 203 /* 204 * timer expiry is asynchronous to vcpu execution therefore defer guest 205 * cp0 accesses 206 */ 207 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 208 } 209 210 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) 211 { 212 /* 213 * timer expiry is asynchronous to vcpu execution therefore defer guest 214 * cp0 accesses 215 */ 216 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); 217 } 218 219 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu, 220 struct kvm_mips_interrupt *irq) 221 { 222 int intr = (int)irq->irq; 223 224 /* 225 * interrupts are asynchronous to vcpu execution therefore defer guest 226 * cp0 accesses 227 */ 228 switch (intr) { 229 case 2: 230 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO); 231 break; 232 233 case 3: 234 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); 235 break; 236 237 case 4: 238 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); 239 break; 240 241 default: 242 break; 243 } 244 245 } 246 247 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 248 struct kvm_mips_interrupt *irq) 249 { 250 int intr = (int)irq->irq; 251 252 /* 253 * interrupts are asynchronous to vcpu execution therefore defer guest 254 * cp0 accesses 255 */ 256 switch (intr) { 257 case -2: 258 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO); 259 break; 260 261 case -3: 262 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); 263 break; 264 265 case -4: 266 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); 267 break; 268 269 default: 270 break; 271 } 272 273 } 274 275 static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = { 276 [MIPS_EXC_INT_TIMER] = C_IRQ5, 277 [MIPS_EXC_INT_IO] = C_IRQ0, 278 [MIPS_EXC_INT_IPI_1] = C_IRQ1, 279 [MIPS_EXC_INT_IPI_2] = C_IRQ2, 280 }; 281 282 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 283 u32 cause) 284 { 285 u32 irq = (priority < MIPS_EXC_MAX) ? 286 kvm_vz_priority_to_irq[priority] : 0; 287 288 switch (priority) { 289 case MIPS_EXC_INT_TIMER: 290 set_gc0_cause(C_TI); 291 break; 292 293 case MIPS_EXC_INT_IO: 294 case MIPS_EXC_INT_IPI_1: 295 case MIPS_EXC_INT_IPI_2: 296 if (cpu_has_guestctl2) 297 set_c0_guestctl2(irq); 298 else 299 set_gc0_cause(irq); 300 break; 301 302 default: 303 break; 304 } 305 306 clear_bit(priority, &vcpu->arch.pending_exceptions); 307 return 1; 308 } 309 310 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 311 u32 cause) 312 { 313 u32 irq = (priority < MIPS_EXC_MAX) ? 314 kvm_vz_priority_to_irq[priority] : 0; 315 316 switch (priority) { 317 case MIPS_EXC_INT_TIMER: 318 /* 319 * Call to kvm_write_c0_guest_compare() clears Cause.TI in 320 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with 321 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not 322 * supported or if not using GuestCtl2 Hardware Clear. 323 */ 324 if (cpu_has_guestctl2) { 325 if (!(read_c0_guestctl2() & (irq << 14))) 326 clear_c0_guestctl2(irq); 327 } else { 328 clear_gc0_cause(irq); 329 } 330 break; 331 332 case MIPS_EXC_INT_IO: 333 case MIPS_EXC_INT_IPI_1: 334 case MIPS_EXC_INT_IPI_2: 335 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ 336 if (cpu_has_guestctl2) { 337 if (!(read_c0_guestctl2() & (irq << 14))) 338 clear_c0_guestctl2(irq); 339 } else { 340 clear_gc0_cause(irq); 341 } 342 break; 343 344 default: 345 break; 346 } 347 348 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 349 return 1; 350 } 351 352 /* 353 * VZ guest timer handling. 354 */ 355 356 /** 357 * _kvm_vz_restore_stimer() - Restore soft timer state. 358 * @vcpu: Virtual CPU. 359 * @compare: CP0_Compare register value, restored by caller. 360 * @cause: CP0_Cause register to restore. 361 * 362 * Restore VZ state relating to the soft timer. 363 */ 364 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare, 365 u32 cause) 366 { 367 /* 368 * Avoid spurious counter interrupts by setting Guest CP0_Count to just 369 * after Guest CP0_Compare. 370 */ 371 write_c0_gtoffset(compare - read_c0_count()); 372 373 back_to_back_c0_hazard(); 374 write_gc0_cause(cause); 375 } 376 377 /** 378 * kvm_vz_restore_timer() - Restore guest timer state. 379 * @vcpu: Virtual CPU. 380 * 381 * Restore soft timer state from saved context. 382 */ 383 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) 384 { 385 struct mips_coproc *cop0 = vcpu->arch.cop0; 386 u32 cause, compare; 387 388 compare = kvm_read_sw_gc0_compare(cop0); 389 cause = kvm_read_sw_gc0_cause(cop0); 390 391 write_gc0_compare(compare); 392 _kvm_vz_restore_stimer(vcpu, compare, cause); 393 } 394 395 /** 396 * kvm_vz_save_timer() - Save guest timer state. 397 * @vcpu: Virtual CPU. 398 * 399 * Save VZ guest timer state. 400 */ 401 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) 402 { 403 struct mips_coproc *cop0 = vcpu->arch.cop0; 404 u32 compare, cause; 405 406 compare = read_gc0_compare(); 407 cause = read_gc0_cause(); 408 409 /* save timer-related state to VCPU context */ 410 kvm_write_sw_gc0_cause(cop0, cause); 411 kvm_write_sw_gc0_compare(cop0, compare); 412 } 413 414 /** 415 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA. 416 * @vcpu: KVM VCPU state. 417 * @gva: Guest virtual address to convert. 418 * @gpa: Output guest physical address. 419 * 420 * Convert a guest virtual address (GVA) which is valid according to the guest 421 * context, to a guest physical address (GPA). 422 * 423 * Returns: 0 on success. 424 * -errno on failure. 425 */ 426 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 427 unsigned long *gpa) 428 { 429 u32 gva32 = gva; 430 431 if ((long)gva == (s32)gva32) { 432 /* Handle canonical 32-bit virtual address */ 433 if ((s32)gva32 < (s32)0xc0000000) { 434 /* legacy unmapped KSeg0 or KSeg1 */ 435 *gpa = gva32 & 0x1fffffff; 436 return 0; 437 } 438 #ifdef CONFIG_64BIT 439 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { 440 /* XKPHYS */ 441 /* 442 * Traditionally fully unmapped. 443 * Bits 61:59 specify the CCA, which we can just mask off here. 444 * Bits 58:PABITS should be zero, but we shouldn't have got here 445 * if it wasn't. 446 */ 447 *gpa = gva & 0x07ffffffffffffff; 448 return 0; 449 #endif 450 } 451 452 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); 453 } 454 455 /** 456 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA. 457 * @vcpu: KVM VCPU state. 458 * @badvaddr: Root BadVAddr. 459 * @gpa: Output guest physical address. 460 * 461 * VZ implementations are permitted to report guest virtual addresses (GVA) in 462 * BadVAddr on a root exception during guest execution, instead of the more 463 * convenient guest physical addresses (GPA). When we get a GVA, this function 464 * converts it to a GPA, taking into account guest segmentation and guest TLB 465 * state. 466 * 467 * Returns: 0 on success. 468 * -errno on failure. 469 */ 470 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr, 471 unsigned long *gpa) 472 { 473 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & 474 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 475 476 /* If BadVAddr is GPA, then all is well in the world */ 477 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) { 478 *gpa = badvaddr; 479 return 0; 480 } 481 482 /* Otherwise we'd expect it to be GVA ... */ 483 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA, 484 "Unexpected gexccode %#x\n", gexccode)) 485 return -EINVAL; 486 487 /* ... and we need to perform the GVA->GPA translation in software */ 488 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa); 489 } 490 491 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu) 492 { 493 u32 *opc = (u32 *) vcpu->arch.pc; 494 u32 cause = vcpu->arch.host_cp0_cause; 495 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 496 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 497 u32 inst = 0; 498 499 /* 500 * Fetch the instruction. 501 */ 502 if (cause & CAUSEF_BD) 503 opc += 1; 504 kvm_get_badinstr(opc, vcpu, &inst); 505 506 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", 507 exccode, opc, inst, badvaddr, 508 read_gc0_status()); 509 kvm_arch_vcpu_dump_regs(vcpu); 510 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 511 return RESUME_HOST; 512 } 513 514 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, 515 u32 *opc, u32 cause, 516 struct kvm_run *run, 517 struct kvm_vcpu *vcpu) 518 { 519 struct mips_coproc *cop0 = vcpu->arch.cop0; 520 enum emulation_result er = EMULATE_DONE; 521 u32 rt, rd, sel; 522 unsigned long curr_pc; 523 unsigned long val; 524 525 /* 526 * Update PC and hold onto current PC in case there is 527 * an error and we want to rollback the PC 528 */ 529 curr_pc = vcpu->arch.pc; 530 er = update_pc(vcpu, cause); 531 if (er == EMULATE_FAIL) 532 return er; 533 534 if (inst.co_format.co) { 535 switch (inst.co_format.func) { 536 case wait_op: 537 er = kvm_mips_emul_wait(vcpu); 538 break; 539 default: 540 er = EMULATE_FAIL; 541 } 542 } else { 543 rt = inst.c0r_format.rt; 544 rd = inst.c0r_format.rd; 545 sel = inst.c0r_format.sel; 546 547 switch (inst.c0r_format.rs) { 548 case dmfc_op: 549 case mfc_op: 550 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 551 cop0->stat[rd][sel]++; 552 #endif 553 if (rd == MIPS_CP0_COUNT && 554 sel == 0) { /* Count */ 555 val = kvm_mips_read_count(vcpu); 556 } else if (rd == MIPS_CP0_COMPARE && 557 sel == 0) { /* Compare */ 558 val = read_gc0_compare(); 559 } else if ((rd == MIPS_CP0_PRID && 560 (sel == 0 || /* PRid */ 561 sel == 2 || /* CDMMBase */ 562 sel == 3)) || /* CMGCRBase */ 563 (rd == MIPS_CP0_STATUS && 564 (sel == 2 || /* SRSCtl */ 565 sel == 3)) || /* SRSMap */ 566 (rd == MIPS_CP0_CONFIG && 567 (sel == 7)) || /* Config7 */ 568 (rd == MIPS_CP0_ERRCTL && 569 (sel == 0))) { /* ErrCtl */ 570 val = cop0->reg[rd][sel]; 571 } else { 572 val = 0; 573 er = EMULATE_FAIL; 574 } 575 576 if (er != EMULATE_FAIL) { 577 /* Sign extend */ 578 if (inst.c0r_format.rs == mfc_op) 579 val = (int)val; 580 vcpu->arch.gprs[rt] = val; 581 } 582 583 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ? 584 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0, 585 KVM_TRACE_COP0(rd, sel), val); 586 break; 587 588 case dmtc_op: 589 case mtc_op: 590 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 591 cop0->stat[rd][sel]++; 592 #endif 593 val = vcpu->arch.gprs[rt]; 594 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ? 595 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0, 596 KVM_TRACE_COP0(rd, sel), val); 597 598 if (rd == MIPS_CP0_COUNT && 599 sel == 0) { /* Count */ 600 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 601 } else if (rd == MIPS_CP0_COMPARE && 602 sel == 0) { /* Compare */ 603 kvm_mips_write_compare(vcpu, 604 vcpu->arch.gprs[rt], 605 true); 606 } else if (rd == MIPS_CP0_ERRCTL && 607 (sel == 0)) { /* ErrCtl */ 608 /* ignore the written value */ 609 } else { 610 er = EMULATE_FAIL; 611 } 612 break; 613 614 default: 615 er = EMULATE_FAIL; 616 break; 617 } 618 } 619 /* Rollback PC only if emulation was unsuccessful */ 620 if (er == EMULATE_FAIL) { 621 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n", 622 curr_pc, __func__, inst.word); 623 624 vcpu->arch.pc = curr_pc; 625 } 626 627 return er; 628 } 629 630 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, 631 u32 *opc, u32 cause, 632 struct kvm_run *run, 633 struct kvm_vcpu *vcpu) 634 { 635 enum emulation_result er = EMULATE_DONE; 636 u32 cache, op_inst, op, base; 637 s16 offset; 638 struct kvm_vcpu_arch *arch = &vcpu->arch; 639 unsigned long va, curr_pc; 640 641 /* 642 * Update PC and hold onto current PC in case there is 643 * an error and we want to rollback the PC 644 */ 645 curr_pc = vcpu->arch.pc; 646 er = update_pc(vcpu, cause); 647 if (er == EMULATE_FAIL) 648 return er; 649 650 base = inst.i_format.rs; 651 op_inst = inst.i_format.rt; 652 if (cpu_has_mips_r6) 653 offset = inst.spec3_format.simmediate; 654 else 655 offset = inst.i_format.simmediate; 656 cache = op_inst & CacheOp_Cache; 657 op = op_inst & CacheOp_Op; 658 659 va = arch->gprs[base] + offset; 660 661 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 662 cache, op, base, arch->gprs[base], offset); 663 664 /* Secondary or tirtiary cache ops ignored */ 665 if (cache != Cache_I && cache != Cache_D) 666 return EMULATE_DONE; 667 668 switch (op_inst) { 669 case Index_Invalidate_I: 670 flush_icache_line_indexed(va); 671 return EMULATE_DONE; 672 case Index_Writeback_Inv_D: 673 flush_dcache_line_indexed(va); 674 return EMULATE_DONE; 675 default: 676 break; 677 }; 678 679 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 680 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], 681 offset); 682 /* Rollback PC */ 683 vcpu->arch.pc = curr_pc; 684 685 return EMULATE_FAIL; 686 } 687 688 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, 689 struct kvm_vcpu *vcpu) 690 { 691 enum emulation_result er = EMULATE_DONE; 692 struct kvm_vcpu_arch *arch = &vcpu->arch; 693 struct kvm_run *run = vcpu->run; 694 union mips_instruction inst; 695 int rd, rt, sel; 696 int err; 697 698 /* 699 * Fetch the instruction. 700 */ 701 if (cause & CAUSEF_BD) 702 opc += 1; 703 err = kvm_get_badinstr(opc, vcpu, &inst.word); 704 if (err) 705 return EMULATE_FAIL; 706 707 switch (inst.r_format.opcode) { 708 case cop0_op: 709 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu); 710 break; 711 #ifndef CONFIG_CPU_MIPSR6 712 case cache_op: 713 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 714 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 715 break; 716 #endif 717 case spec3_op: 718 switch (inst.spec3_format.func) { 719 #ifdef CONFIG_CPU_MIPSR6 720 case cache6_op: 721 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 722 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 723 break; 724 #endif 725 case rdhwr_op: 726 if (inst.r_format.rs || (inst.r_format.re >> 3)) 727 goto unknown; 728 729 rd = inst.r_format.rd; 730 rt = inst.r_format.rt; 731 sel = inst.r_format.re & 0x7; 732 733 switch (rd) { 734 case MIPS_HWR_CC: /* Read count register */ 735 arch->gprs[rt] = 736 (long)(int)kvm_mips_read_count(vcpu); 737 break; 738 default: 739 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 740 KVM_TRACE_HWR(rd, sel), 0); 741 goto unknown; 742 }; 743 744 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 745 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); 746 747 er = update_pc(vcpu, cause); 748 break; 749 default: 750 goto unknown; 751 }; 752 break; 753 unknown: 754 755 default: 756 kvm_err("GPSI exception not supported (%p/%#x)\n", 757 opc, inst.word); 758 kvm_arch_vcpu_dump_regs(vcpu); 759 er = EMULATE_FAIL; 760 break; 761 } 762 763 return er; 764 } 765 766 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc, 767 struct kvm_vcpu *vcpu) 768 { 769 enum emulation_result er = EMULATE_DONE; 770 struct kvm_vcpu_arch *arch = &vcpu->arch; 771 union mips_instruction inst; 772 int err; 773 774 /* 775 * Fetch the instruction. 776 */ 777 if (cause & CAUSEF_BD) 778 opc += 1; 779 err = kvm_get_badinstr(opc, vcpu, &inst.word); 780 if (err) 781 return EMULATE_FAIL; 782 783 /* complete MTC0 on behalf of guest and advance EPC */ 784 if (inst.c0r_format.opcode == cop0_op && 785 inst.c0r_format.rs == mtc_op && 786 inst.c0r_format.z == 0) { 787 int rt = inst.c0r_format.rt; 788 int rd = inst.c0r_format.rd; 789 int sel = inst.c0r_format.sel; 790 unsigned int val = arch->gprs[rt]; 791 unsigned int old_val, change; 792 793 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel), 794 val); 795 796 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 797 /* FR bit should read as zero if no FPU */ 798 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 799 val &= ~(ST0_CU1 | ST0_FR); 800 801 /* 802 * Also don't allow FR to be set if host doesn't support 803 * it. 804 */ 805 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) 806 val &= ~ST0_FR; 807 808 old_val = read_gc0_status(); 809 change = val ^ old_val; 810 811 if (change & ST0_FR) { 812 /* 813 * FPU and Vector register state is made 814 * UNPREDICTABLE by a change of FR, so don't 815 * even bother saving it. 816 */ 817 kvm_drop_fpu(vcpu); 818 } 819 820 /* 821 * If MSA state is already live, it is undefined how it 822 * interacts with FR=0 FPU state, and we don't want to 823 * hit reserved instruction exceptions trying to save 824 * the MSA state later when CU=1 && FR=1, so play it 825 * safe and save it first. 826 */ 827 if (change & ST0_CU1 && !(val & ST0_FR) && 828 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 829 kvm_lose_fpu(vcpu); 830 831 write_gc0_status(val); 832 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 833 u32 old_cause = read_gc0_cause(); 834 u32 change = old_cause ^ val; 835 836 /* DC bit enabling/disabling timer? */ 837 if (change & CAUSEF_DC) { 838 if (val & CAUSEF_DC) 839 kvm_mips_count_disable_cause(vcpu); 840 else 841 kvm_mips_count_enable_cause(vcpu); 842 } 843 844 /* Only certain bits are RW to the guest */ 845 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP | 846 CAUSEF_IP0 | CAUSEF_IP1); 847 848 /* WP can only be cleared */ 849 change &= ~CAUSEF_WP | old_cause; 850 851 write_gc0_cause(old_cause ^ change); 852 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */ 853 write_gc0_intctl(val); 854 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 855 old_val = read_gc0_config5(); 856 change = val ^ old_val; 857 /* Handle changes in FPU/MSA modes */ 858 preempt_disable(); 859 860 /* 861 * Propagate FRE changes immediately if the FPU 862 * context is already loaded. 863 */ 864 if (change & MIPS_CONF5_FRE && 865 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 866 change_c0_config5(MIPS_CONF5_FRE, val); 867 868 preempt_enable(); 869 870 val = old_val ^ 871 (change & kvm_vz_config5_guest_wrmask(vcpu)); 872 write_gc0_config5(val); 873 } else { 874 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n", 875 opc, inst.word); 876 er = EMULATE_FAIL; 877 } 878 879 if (er != EMULATE_FAIL) 880 er = update_pc(vcpu, cause); 881 } else { 882 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n", 883 opc, inst.word); 884 er = EMULATE_FAIL; 885 } 886 887 return er; 888 } 889 890 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc, 891 struct kvm_vcpu *vcpu) 892 { 893 enum emulation_result er; 894 union mips_instruction inst; 895 unsigned long curr_pc; 896 int err; 897 898 if (cause & CAUSEF_BD) 899 opc += 1; 900 err = kvm_get_badinstr(opc, vcpu, &inst.word); 901 if (err) 902 return EMULATE_FAIL; 903 904 /* 905 * Update PC and hold onto current PC in case there is 906 * an error and we want to rollback the PC 907 */ 908 curr_pc = vcpu->arch.pc; 909 er = update_pc(vcpu, cause); 910 if (er == EMULATE_FAIL) 911 return er; 912 913 er = kvm_mips_emul_hypcall(vcpu, inst); 914 if (er == EMULATE_FAIL) 915 vcpu->arch.pc = curr_pc; 916 917 return er; 918 } 919 920 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode, 921 u32 cause, 922 u32 *opc, 923 struct kvm_vcpu *vcpu) 924 { 925 u32 inst; 926 927 /* 928 * Fetch the instruction. 929 */ 930 if (cause & CAUSEF_BD) 931 opc += 1; 932 kvm_get_badinstr(opc, vcpu, &inst); 933 934 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n", 935 gexccode, opc, inst, read_gc0_status()); 936 937 return EMULATE_FAIL; 938 } 939 940 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu) 941 { 942 u32 *opc = (u32 *) vcpu->arch.pc; 943 u32 cause = vcpu->arch.host_cp0_cause; 944 enum emulation_result er = EMULATE_DONE; 945 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & 946 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 947 int ret = RESUME_GUEST; 948 949 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode); 950 switch (gexccode) { 951 case MIPS_GCTL0_GEXC_GPSI: 952 ++vcpu->stat.vz_gpsi_exits; 953 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu); 954 break; 955 case MIPS_GCTL0_GEXC_GSFC: 956 ++vcpu->stat.vz_gsfc_exits; 957 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu); 958 break; 959 case MIPS_GCTL0_GEXC_HC: 960 ++vcpu->stat.vz_hc_exits; 961 er = kvm_trap_vz_handle_hc(cause, opc, vcpu); 962 break; 963 case MIPS_GCTL0_GEXC_GRR: 964 ++vcpu->stat.vz_grr_exits; 965 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 966 vcpu); 967 break; 968 case MIPS_GCTL0_GEXC_GVA: 969 ++vcpu->stat.vz_gva_exits; 970 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 971 vcpu); 972 break; 973 case MIPS_GCTL0_GEXC_GHFC: 974 ++vcpu->stat.vz_ghfc_exits; 975 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 976 vcpu); 977 break; 978 case MIPS_GCTL0_GEXC_GPA: 979 ++vcpu->stat.vz_gpa_exits; 980 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 981 vcpu); 982 break; 983 default: 984 ++vcpu->stat.vz_resvd_exits; 985 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 986 vcpu); 987 break; 988 989 } 990 991 if (er == EMULATE_DONE) { 992 ret = RESUME_GUEST; 993 } else if (er == EMULATE_HYPERCALL) { 994 ret = kvm_mips_handle_hypcall(vcpu); 995 } else { 996 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 997 ret = RESUME_HOST; 998 } 999 return ret; 1000 } 1001 1002 /** 1003 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor. 1004 * @vcpu: Virtual CPU context. 1005 * 1006 * Handle when the guest attempts to use a coprocessor which hasn't been allowed 1007 * by the root context. 1008 */ 1009 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) 1010 { 1011 struct kvm_run *run = vcpu->run; 1012 u32 cause = vcpu->arch.host_cp0_cause; 1013 enum emulation_result er = EMULATE_FAIL; 1014 int ret = RESUME_GUEST; 1015 1016 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { 1017 /* 1018 * If guest FPU not present, the FPU operation should have been 1019 * treated as a reserved instruction! 1020 * If FPU already in use, we shouldn't get this at all. 1021 */ 1022 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || 1023 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { 1024 preempt_enable(); 1025 return EMULATE_FAIL; 1026 } 1027 1028 kvm_own_fpu(vcpu); 1029 er = EMULATE_DONE; 1030 } 1031 /* other coprocessors not handled */ 1032 1033 switch (er) { 1034 case EMULATE_DONE: 1035 ret = RESUME_GUEST; 1036 break; 1037 1038 case EMULATE_FAIL: 1039 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1040 ret = RESUME_HOST; 1041 break; 1042 1043 default: 1044 BUG(); 1045 } 1046 return ret; 1047 } 1048 1049 /** 1050 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root. 1051 * @vcpu: Virtual CPU context. 1052 * 1053 * Handle when the guest attempts to use MSA when it is disabled in the root 1054 * context. 1055 */ 1056 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) 1057 { 1058 struct kvm_run *run = vcpu->run; 1059 1060 /* 1061 * If MSA not present or not exposed to guest or FR=0, the MSA operation 1062 * should have been treated as a reserved instruction! 1063 * Same if CU1=1, FR=0. 1064 * If MSA already in use, we shouldn't get this at all. 1065 */ 1066 if (!kvm_mips_guest_has_msa(&vcpu->arch) || 1067 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 || 1068 !(read_gc0_config5() & MIPS_CONF5_MSAEN) || 1069 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { 1070 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1071 return RESUME_HOST; 1072 } 1073 1074 kvm_own_msa(vcpu); 1075 1076 return RESUME_GUEST; 1077 } 1078 1079 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) 1080 { 1081 struct kvm_run *run = vcpu->run; 1082 u32 *opc = (u32 *) vcpu->arch.pc; 1083 u32 cause = vcpu->arch.host_cp0_cause; 1084 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1085 union mips_instruction inst; 1086 enum emulation_result er = EMULATE_DONE; 1087 int err, ret = RESUME_GUEST; 1088 1089 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) { 1090 /* A code fetch fault doesn't count as an MMIO */ 1091 if (kvm_is_ifetch_fault(&vcpu->arch)) { 1092 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1093 return RESUME_HOST; 1094 } 1095 1096 /* Fetch the instruction */ 1097 if (cause & CAUSEF_BD) 1098 opc += 1; 1099 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1100 if (err) { 1101 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1102 return RESUME_HOST; 1103 } 1104 1105 /* Treat as MMIO */ 1106 er = kvm_mips_emulate_load(inst, cause, run, vcpu); 1107 if (er == EMULATE_FAIL) { 1108 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1109 opc, badvaddr); 1110 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1111 } 1112 } 1113 1114 if (er == EMULATE_DONE) { 1115 ret = RESUME_GUEST; 1116 } else if (er == EMULATE_DO_MMIO) { 1117 run->exit_reason = KVM_EXIT_MMIO; 1118 ret = RESUME_HOST; 1119 } else { 1120 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1121 ret = RESUME_HOST; 1122 } 1123 return ret; 1124 } 1125 1126 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu) 1127 { 1128 struct kvm_run *run = vcpu->run; 1129 u32 *opc = (u32 *) vcpu->arch.pc; 1130 u32 cause = vcpu->arch.host_cp0_cause; 1131 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1132 union mips_instruction inst; 1133 enum emulation_result er = EMULATE_DONE; 1134 int err; 1135 int ret = RESUME_GUEST; 1136 1137 /* Just try the access again if we couldn't do the translation */ 1138 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr)) 1139 return RESUME_GUEST; 1140 vcpu->arch.host_cp0_badvaddr = badvaddr; 1141 1142 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) { 1143 /* Fetch the instruction */ 1144 if (cause & CAUSEF_BD) 1145 opc += 1; 1146 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1147 if (err) { 1148 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1149 return RESUME_HOST; 1150 } 1151 1152 /* Treat as MMIO */ 1153 er = kvm_mips_emulate_store(inst, cause, run, vcpu); 1154 if (er == EMULATE_FAIL) { 1155 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1156 opc, badvaddr); 1157 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1158 } 1159 } 1160 1161 if (er == EMULATE_DONE) { 1162 ret = RESUME_GUEST; 1163 } else if (er == EMULATE_DO_MMIO) { 1164 run->exit_reason = KVM_EXIT_MMIO; 1165 ret = RESUME_HOST; 1166 } else { 1167 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1168 ret = RESUME_HOST; 1169 } 1170 return ret; 1171 } 1172 1173 static u64 kvm_vz_get_one_regs[] = { 1174 KVM_REG_MIPS_CP0_INDEX, 1175 KVM_REG_MIPS_CP0_ENTRYLO0, 1176 KVM_REG_MIPS_CP0_ENTRYLO1, 1177 KVM_REG_MIPS_CP0_CONTEXT, 1178 KVM_REG_MIPS_CP0_PAGEMASK, 1179 KVM_REG_MIPS_CP0_PAGEGRAIN, 1180 KVM_REG_MIPS_CP0_WIRED, 1181 KVM_REG_MIPS_CP0_HWRENA, 1182 KVM_REG_MIPS_CP0_BADVADDR, 1183 KVM_REG_MIPS_CP0_COUNT, 1184 KVM_REG_MIPS_CP0_ENTRYHI, 1185 KVM_REG_MIPS_CP0_COMPARE, 1186 KVM_REG_MIPS_CP0_STATUS, 1187 KVM_REG_MIPS_CP0_INTCTL, 1188 KVM_REG_MIPS_CP0_CAUSE, 1189 KVM_REG_MIPS_CP0_EPC, 1190 KVM_REG_MIPS_CP0_PRID, 1191 KVM_REG_MIPS_CP0_EBASE, 1192 KVM_REG_MIPS_CP0_CONFIG, 1193 KVM_REG_MIPS_CP0_CONFIG1, 1194 KVM_REG_MIPS_CP0_CONFIG2, 1195 KVM_REG_MIPS_CP0_CONFIG3, 1196 KVM_REG_MIPS_CP0_CONFIG4, 1197 KVM_REG_MIPS_CP0_CONFIG5, 1198 #ifdef CONFIG_64BIT 1199 KVM_REG_MIPS_CP0_XCONTEXT, 1200 #endif 1201 KVM_REG_MIPS_CP0_ERROREPC, 1202 1203 KVM_REG_MIPS_COUNT_CTL, 1204 KVM_REG_MIPS_COUNT_RESUME, 1205 KVM_REG_MIPS_COUNT_HZ, 1206 }; 1207 1208 static u64 kvm_vz_get_one_regs_kscratch[] = { 1209 KVM_REG_MIPS_CP0_KSCRATCH1, 1210 KVM_REG_MIPS_CP0_KSCRATCH2, 1211 KVM_REG_MIPS_CP0_KSCRATCH3, 1212 KVM_REG_MIPS_CP0_KSCRATCH4, 1213 KVM_REG_MIPS_CP0_KSCRATCH5, 1214 KVM_REG_MIPS_CP0_KSCRATCH6, 1215 }; 1216 1217 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu) 1218 { 1219 unsigned long ret; 1220 1221 ret = ARRAY_SIZE(kvm_vz_get_one_regs); 1222 if (cpu_guest_has_userlocal) 1223 ++ret; 1224 if (cpu_guest_has_badinstr) 1225 ++ret; 1226 if (cpu_guest_has_badinstrp) 1227 ++ret; 1228 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask); 1229 1230 return ret; 1231 } 1232 1233 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) 1234 { 1235 u64 index; 1236 unsigned int i; 1237 1238 if (copy_to_user(indices, kvm_vz_get_one_regs, 1239 sizeof(kvm_vz_get_one_regs))) 1240 return -EFAULT; 1241 indices += ARRAY_SIZE(kvm_vz_get_one_regs); 1242 1243 if (cpu_guest_has_userlocal) { 1244 index = KVM_REG_MIPS_CP0_USERLOCAL; 1245 if (copy_to_user(indices, &index, sizeof(index))) 1246 return -EFAULT; 1247 ++indices; 1248 } 1249 if (cpu_guest_has_badinstr) { 1250 index = KVM_REG_MIPS_CP0_BADINSTR; 1251 if (copy_to_user(indices, &index, sizeof(index))) 1252 return -EFAULT; 1253 ++indices; 1254 } 1255 if (cpu_guest_has_badinstrp) { 1256 index = KVM_REG_MIPS_CP0_BADINSTRP; 1257 if (copy_to_user(indices, &index, sizeof(index))) 1258 return -EFAULT; 1259 ++indices; 1260 } 1261 for (i = 0; i < 6; ++i) { 1262 if (!cpu_guest_has_kscr(i + 2)) 1263 continue; 1264 1265 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i], 1266 sizeof(kvm_vz_get_one_regs_kscratch[i]))) 1267 return -EFAULT; 1268 ++indices; 1269 } 1270 1271 return 0; 1272 } 1273 1274 static inline s64 entrylo_kvm_to_user(unsigned long v) 1275 { 1276 s64 mask, ret = v; 1277 1278 if (BITS_PER_LONG == 32) { 1279 /* 1280 * KVM API exposes 64-bit version of the register, so move the 1281 * RI/XI bits up into place. 1282 */ 1283 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1284 ret &= ~mask; 1285 ret |= ((s64)v & mask) << 32; 1286 } 1287 return ret; 1288 } 1289 1290 static inline unsigned long entrylo_user_to_kvm(s64 v) 1291 { 1292 unsigned long mask, ret = v; 1293 1294 if (BITS_PER_LONG == 32) { 1295 /* 1296 * KVM API exposes 64-bit versiono of the register, so move the 1297 * RI/XI bits down into place. 1298 */ 1299 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1300 ret &= ~mask; 1301 ret |= (v >> 32) & mask; 1302 } 1303 return ret; 1304 } 1305 1306 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, 1307 const struct kvm_one_reg *reg, 1308 s64 *v) 1309 { 1310 struct mips_coproc *cop0 = vcpu->arch.cop0; 1311 unsigned int idx; 1312 1313 switch (reg->id) { 1314 case KVM_REG_MIPS_CP0_INDEX: 1315 *v = (long)read_gc0_index(); 1316 break; 1317 case KVM_REG_MIPS_CP0_ENTRYLO0: 1318 *v = entrylo_kvm_to_user(read_gc0_entrylo0()); 1319 break; 1320 case KVM_REG_MIPS_CP0_ENTRYLO1: 1321 *v = entrylo_kvm_to_user(read_gc0_entrylo1()); 1322 break; 1323 case KVM_REG_MIPS_CP0_CONTEXT: 1324 *v = (long)read_gc0_context(); 1325 break; 1326 case KVM_REG_MIPS_CP0_USERLOCAL: 1327 if (!cpu_guest_has_userlocal) 1328 return -EINVAL; 1329 *v = read_gc0_userlocal(); 1330 break; 1331 case KVM_REG_MIPS_CP0_PAGEMASK: 1332 *v = (long)read_gc0_pagemask(); 1333 break; 1334 case KVM_REG_MIPS_CP0_PAGEGRAIN: 1335 *v = (long)read_gc0_pagegrain(); 1336 break; 1337 case KVM_REG_MIPS_CP0_WIRED: 1338 *v = (long)read_gc0_wired(); 1339 break; 1340 case KVM_REG_MIPS_CP0_HWRENA: 1341 *v = (long)read_gc0_hwrena(); 1342 break; 1343 case KVM_REG_MIPS_CP0_BADVADDR: 1344 *v = (long)read_gc0_badvaddr(); 1345 break; 1346 case KVM_REG_MIPS_CP0_BADINSTR: 1347 if (!cpu_guest_has_badinstr) 1348 return -EINVAL; 1349 *v = read_gc0_badinstr(); 1350 break; 1351 case KVM_REG_MIPS_CP0_BADINSTRP: 1352 if (!cpu_guest_has_badinstrp) 1353 return -EINVAL; 1354 *v = read_gc0_badinstrp(); 1355 break; 1356 case KVM_REG_MIPS_CP0_COUNT: 1357 *v = kvm_mips_read_count(vcpu); 1358 break; 1359 case KVM_REG_MIPS_CP0_ENTRYHI: 1360 *v = (long)read_gc0_entryhi(); 1361 break; 1362 case KVM_REG_MIPS_CP0_COMPARE: 1363 *v = (long)read_gc0_compare(); 1364 break; 1365 case KVM_REG_MIPS_CP0_STATUS: 1366 *v = (long)read_gc0_status(); 1367 break; 1368 case KVM_REG_MIPS_CP0_INTCTL: 1369 *v = read_gc0_intctl(); 1370 break; 1371 case KVM_REG_MIPS_CP0_CAUSE: 1372 *v = (long)read_gc0_cause(); 1373 break; 1374 case KVM_REG_MIPS_CP0_EPC: 1375 *v = (long)read_gc0_epc(); 1376 break; 1377 case KVM_REG_MIPS_CP0_PRID: 1378 *v = (long)kvm_read_c0_guest_prid(cop0); 1379 break; 1380 case KVM_REG_MIPS_CP0_EBASE: 1381 *v = kvm_vz_read_gc0_ebase(); 1382 break; 1383 case KVM_REG_MIPS_CP0_CONFIG: 1384 *v = read_gc0_config(); 1385 break; 1386 case KVM_REG_MIPS_CP0_CONFIG1: 1387 if (!cpu_guest_has_conf1) 1388 return -EINVAL; 1389 *v = read_gc0_config1(); 1390 break; 1391 case KVM_REG_MIPS_CP0_CONFIG2: 1392 if (!cpu_guest_has_conf2) 1393 return -EINVAL; 1394 *v = read_gc0_config2(); 1395 break; 1396 case KVM_REG_MIPS_CP0_CONFIG3: 1397 if (!cpu_guest_has_conf3) 1398 return -EINVAL; 1399 *v = read_gc0_config3(); 1400 break; 1401 case KVM_REG_MIPS_CP0_CONFIG4: 1402 if (!cpu_guest_has_conf4) 1403 return -EINVAL; 1404 *v = read_gc0_config4(); 1405 break; 1406 case KVM_REG_MIPS_CP0_CONFIG5: 1407 if (!cpu_guest_has_conf5) 1408 return -EINVAL; 1409 *v = read_gc0_config5(); 1410 break; 1411 #ifdef CONFIG_64BIT 1412 case KVM_REG_MIPS_CP0_XCONTEXT: 1413 *v = read_gc0_xcontext(); 1414 break; 1415 #endif 1416 case KVM_REG_MIPS_CP0_ERROREPC: 1417 *v = (long)read_gc0_errorepc(); 1418 break; 1419 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 1420 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 1421 if (!cpu_guest_has_kscr(idx)) 1422 return -EINVAL; 1423 switch (idx) { 1424 case 2: 1425 *v = (long)read_gc0_kscratch1(); 1426 break; 1427 case 3: 1428 *v = (long)read_gc0_kscratch2(); 1429 break; 1430 case 4: 1431 *v = (long)read_gc0_kscratch3(); 1432 break; 1433 case 5: 1434 *v = (long)read_gc0_kscratch4(); 1435 break; 1436 case 6: 1437 *v = (long)read_gc0_kscratch5(); 1438 break; 1439 case 7: 1440 *v = (long)read_gc0_kscratch6(); 1441 break; 1442 } 1443 break; 1444 case KVM_REG_MIPS_COUNT_CTL: 1445 *v = vcpu->arch.count_ctl; 1446 break; 1447 case KVM_REG_MIPS_COUNT_RESUME: 1448 *v = ktime_to_ns(vcpu->arch.count_resume); 1449 break; 1450 case KVM_REG_MIPS_COUNT_HZ: 1451 *v = vcpu->arch.count_hz; 1452 break; 1453 default: 1454 return -EINVAL; 1455 } 1456 return 0; 1457 } 1458 1459 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, 1460 const struct kvm_one_reg *reg, 1461 s64 v) 1462 { 1463 struct mips_coproc *cop0 = vcpu->arch.cop0; 1464 unsigned int idx; 1465 int ret = 0; 1466 unsigned int cur, change; 1467 1468 switch (reg->id) { 1469 case KVM_REG_MIPS_CP0_INDEX: 1470 write_gc0_index(v); 1471 break; 1472 case KVM_REG_MIPS_CP0_ENTRYLO0: 1473 write_gc0_entrylo0(entrylo_user_to_kvm(v)); 1474 break; 1475 case KVM_REG_MIPS_CP0_ENTRYLO1: 1476 write_gc0_entrylo1(entrylo_user_to_kvm(v)); 1477 break; 1478 case KVM_REG_MIPS_CP0_CONTEXT: 1479 write_gc0_context(v); 1480 break; 1481 case KVM_REG_MIPS_CP0_USERLOCAL: 1482 if (!cpu_guest_has_userlocal) 1483 return -EINVAL; 1484 write_gc0_userlocal(v); 1485 break; 1486 case KVM_REG_MIPS_CP0_PAGEMASK: 1487 write_gc0_pagemask(v); 1488 break; 1489 case KVM_REG_MIPS_CP0_PAGEGRAIN: 1490 write_gc0_pagegrain(v); 1491 break; 1492 case KVM_REG_MIPS_CP0_WIRED: 1493 change_gc0_wired(MIPSR6_WIRED_WIRED, v); 1494 break; 1495 case KVM_REG_MIPS_CP0_HWRENA: 1496 write_gc0_hwrena(v); 1497 break; 1498 case KVM_REG_MIPS_CP0_BADVADDR: 1499 write_gc0_badvaddr(v); 1500 break; 1501 case KVM_REG_MIPS_CP0_BADINSTR: 1502 if (!cpu_guest_has_badinstr) 1503 return -EINVAL; 1504 write_gc0_badinstr(v); 1505 break; 1506 case KVM_REG_MIPS_CP0_BADINSTRP: 1507 if (!cpu_guest_has_badinstrp) 1508 return -EINVAL; 1509 write_gc0_badinstrp(v); 1510 break; 1511 case KVM_REG_MIPS_CP0_COUNT: 1512 kvm_mips_write_count(vcpu, v); 1513 break; 1514 case KVM_REG_MIPS_CP0_ENTRYHI: 1515 write_gc0_entryhi(v); 1516 break; 1517 case KVM_REG_MIPS_CP0_COMPARE: 1518 kvm_mips_write_compare(vcpu, v, false); 1519 break; 1520 case KVM_REG_MIPS_CP0_STATUS: 1521 write_gc0_status(v); 1522 break; 1523 case KVM_REG_MIPS_CP0_INTCTL: 1524 write_gc0_intctl(v); 1525 break; 1526 case KVM_REG_MIPS_CP0_CAUSE: 1527 /* 1528 * If the timer is stopped or started (DC bit) it must look 1529 * atomic with changes to the timer interrupt pending bit (TI). 1530 * A timer interrupt should not happen in between. 1531 */ 1532 if ((read_gc0_cause() ^ v) & CAUSEF_DC) { 1533 if (v & CAUSEF_DC) { 1534 /* disable timer first */ 1535 kvm_mips_count_disable_cause(vcpu); 1536 change_gc0_cause((u32)~CAUSEF_DC, v); 1537 } else { 1538 /* enable timer last */ 1539 change_gc0_cause((u32)~CAUSEF_DC, v); 1540 kvm_mips_count_enable_cause(vcpu); 1541 } 1542 } else { 1543 write_gc0_cause(v); 1544 } 1545 break; 1546 case KVM_REG_MIPS_CP0_EPC: 1547 write_gc0_epc(v); 1548 break; 1549 case KVM_REG_MIPS_CP0_PRID: 1550 kvm_write_c0_guest_prid(cop0, v); 1551 break; 1552 case KVM_REG_MIPS_CP0_EBASE: 1553 kvm_vz_write_gc0_ebase(v); 1554 break; 1555 case KVM_REG_MIPS_CP0_CONFIG: 1556 cur = read_gc0_config(); 1557 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu); 1558 if (change) { 1559 v = cur ^ change; 1560 write_gc0_config(v); 1561 } 1562 break; 1563 case KVM_REG_MIPS_CP0_CONFIG1: 1564 if (!cpu_guest_has_conf1) 1565 break; 1566 cur = read_gc0_config1(); 1567 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu); 1568 if (change) { 1569 v = cur ^ change; 1570 write_gc0_config1(v); 1571 } 1572 break; 1573 case KVM_REG_MIPS_CP0_CONFIG2: 1574 if (!cpu_guest_has_conf2) 1575 break; 1576 cur = read_gc0_config2(); 1577 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu); 1578 if (change) { 1579 v = cur ^ change; 1580 write_gc0_config2(v); 1581 } 1582 break; 1583 case KVM_REG_MIPS_CP0_CONFIG3: 1584 if (!cpu_guest_has_conf3) 1585 break; 1586 cur = read_gc0_config3(); 1587 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu); 1588 if (change) { 1589 v = cur ^ change; 1590 write_gc0_config3(v); 1591 } 1592 break; 1593 case KVM_REG_MIPS_CP0_CONFIG4: 1594 if (!cpu_guest_has_conf4) 1595 break; 1596 cur = read_gc0_config4(); 1597 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu); 1598 if (change) { 1599 v = cur ^ change; 1600 write_gc0_config4(v); 1601 } 1602 break; 1603 case KVM_REG_MIPS_CP0_CONFIG5: 1604 if (!cpu_guest_has_conf5) 1605 break; 1606 cur = read_gc0_config5(); 1607 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu); 1608 if (change) { 1609 v = cur ^ change; 1610 write_gc0_config5(v); 1611 } 1612 break; 1613 #ifdef CONFIG_64BIT 1614 case KVM_REG_MIPS_CP0_XCONTEXT: 1615 write_gc0_xcontext(v); 1616 break; 1617 #endif 1618 case KVM_REG_MIPS_CP0_ERROREPC: 1619 write_gc0_errorepc(v); 1620 break; 1621 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 1622 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 1623 if (!cpu_guest_has_kscr(idx)) 1624 return -EINVAL; 1625 switch (idx) { 1626 case 2: 1627 write_gc0_kscratch1(v); 1628 break; 1629 case 3: 1630 write_gc0_kscratch2(v); 1631 break; 1632 case 4: 1633 write_gc0_kscratch3(v); 1634 break; 1635 case 5: 1636 write_gc0_kscratch4(v); 1637 break; 1638 case 6: 1639 write_gc0_kscratch5(v); 1640 break; 1641 case 7: 1642 write_gc0_kscratch6(v); 1643 break; 1644 } 1645 break; 1646 case KVM_REG_MIPS_COUNT_CTL: 1647 ret = kvm_mips_set_count_ctl(vcpu, v); 1648 break; 1649 case KVM_REG_MIPS_COUNT_RESUME: 1650 ret = kvm_mips_set_count_resume(vcpu, v); 1651 break; 1652 case KVM_REG_MIPS_COUNT_HZ: 1653 ret = kvm_mips_set_count_hz(vcpu, v); 1654 break; 1655 default: 1656 return -EINVAL; 1657 } 1658 return ret; 1659 } 1660 1661 #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache) 1662 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu) 1663 { 1664 unsigned long guestid = guestid_cache(cpu); 1665 1666 if (!(++guestid & GUESTID_MASK)) { 1667 if (cpu_has_vtag_icache) 1668 flush_icache_all(); 1669 1670 if (!guestid) /* fix version if needed */ 1671 guestid = GUESTID_FIRST_VERSION; 1672 1673 ++guestid; /* guestid 0 reserved for root */ 1674 1675 /* start new guestid cycle */ 1676 kvm_vz_local_flush_roottlb_all_guests(); 1677 kvm_vz_local_flush_guesttlb_all(); 1678 } 1679 1680 guestid_cache(cpu) = guestid; 1681 } 1682 1683 /* Returns 1 if the guest TLB may be clobbered */ 1684 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu) 1685 { 1686 int ret = 0; 1687 int i; 1688 1689 if (!vcpu->requests) 1690 return 0; 1691 1692 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1693 if (cpu_has_guestid) { 1694 /* Drop all GuestIDs for this VCPU */ 1695 for_each_possible_cpu(i) 1696 vcpu->arch.vzguestid[i] = 0; 1697 /* This will clobber guest TLB contents too */ 1698 ret = 1; 1699 } 1700 /* 1701 * For Root ASID Dealias (RAD) we don't do anything here, but we 1702 * still need the request to ensure we recheck asid_flush_mask. 1703 * We can still return 0 as only the root TLB will be affected 1704 * by a root ASID flush. 1705 */ 1706 } 1707 1708 return ret; 1709 } 1710 1711 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu) 1712 { 1713 unsigned int wired = read_gc0_wired(); 1714 struct kvm_mips_tlb *tlbs; 1715 int i; 1716 1717 /* Expand the wired TLB array if necessary */ 1718 wired &= MIPSR6_WIRED_WIRED; 1719 if (wired > vcpu->arch.wired_tlb_limit) { 1720 tlbs = krealloc(vcpu->arch.wired_tlb, wired * 1721 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); 1722 if (WARN_ON(!tlbs)) { 1723 /* Save whatever we can */ 1724 wired = vcpu->arch.wired_tlb_limit; 1725 } else { 1726 vcpu->arch.wired_tlb = tlbs; 1727 vcpu->arch.wired_tlb_limit = wired; 1728 } 1729 } 1730 1731 if (wired) 1732 /* Save wired entries from the guest TLB */ 1733 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); 1734 /* Invalidate any dropped entries since last time */ 1735 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { 1736 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 1737 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; 1738 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; 1739 vcpu->arch.wired_tlb[i].tlb_mask = 0; 1740 } 1741 vcpu->arch.wired_tlb_used = wired; 1742 } 1743 1744 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu) 1745 { 1746 /* Load wired entries into the guest TLB */ 1747 if (vcpu->arch.wired_tlb) 1748 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, 1749 vcpu->arch.wired_tlb_used); 1750 } 1751 1752 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) 1753 { 1754 struct kvm *kvm = vcpu->kvm; 1755 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; 1756 bool migrated; 1757 1758 /* 1759 * Are we entering guest context on a different CPU to last time? 1760 * If so, the VCPU's guest TLB state on this CPU may be stale. 1761 */ 1762 migrated = (vcpu->arch.last_exec_cpu != cpu); 1763 vcpu->arch.last_exec_cpu = cpu; 1764 1765 /* 1766 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and 1767 * remains set until another vcpu is loaded in. As a rule GuestRID 1768 * remains zeroed when in root context unless the kernel is busy 1769 * manipulating guest tlb entries. 1770 */ 1771 if (cpu_has_guestid) { 1772 /* 1773 * Check if our GuestID is of an older version and thus invalid. 1774 * 1775 * We also discard the stored GuestID if we've executed on 1776 * another CPU, as the guest mappings may have changed without 1777 * hypervisor knowledge. 1778 */ 1779 if (migrated || 1780 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & 1781 GUESTID_VERSION_MASK) { 1782 kvm_vz_get_new_guestid(cpu, vcpu); 1783 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); 1784 trace_kvm_guestid_change(vcpu, 1785 vcpu->arch.vzguestid[cpu]); 1786 } 1787 1788 /* Restore GuestID */ 1789 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); 1790 } else { 1791 /* 1792 * The Guest TLB only stores a single guest's TLB state, so 1793 * flush it if another VCPU has executed on this CPU. 1794 * 1795 * We also flush if we've executed on another CPU, as the guest 1796 * mappings may have changed without hypervisor knowledge. 1797 */ 1798 if (migrated || last_exec_vcpu[cpu] != vcpu) 1799 kvm_vz_local_flush_guesttlb_all(); 1800 last_exec_vcpu[cpu] = vcpu; 1801 1802 /* 1803 * Root ASID dealiases guest GPA mappings in the root TLB. 1804 * Allocate new root ASID if needed. 1805 */ 1806 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) 1807 || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) & 1808 asid_version_mask(cpu)) 1809 get_new_mmu_context(gpa_mm, cpu); 1810 } 1811 } 1812 1813 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1814 { 1815 struct mips_coproc *cop0 = vcpu->arch.cop0; 1816 bool migrated, all; 1817 1818 /* 1819 * Have we migrated to a different CPU? 1820 * If so, any old guest TLB state may be stale. 1821 */ 1822 migrated = (vcpu->arch.last_sched_cpu != cpu); 1823 1824 /* 1825 * Was this the last VCPU to run on this CPU? 1826 * If not, any old guest state from this VCPU will have been clobbered. 1827 */ 1828 all = migrated || (last_vcpu[cpu] != vcpu); 1829 last_vcpu[cpu] = vcpu; 1830 1831 /* 1832 * Restore CP0_Wired unconditionally as we clear it after use, and 1833 * restore wired guest TLB entries (while in guest context). 1834 */ 1835 kvm_restore_gc0_wired(cop0); 1836 if (current->flags & PF_VCPU) { 1837 tlbw_use_hazard(); 1838 kvm_vz_vcpu_load_tlb(vcpu, cpu); 1839 kvm_vz_vcpu_load_wired(vcpu); 1840 } 1841 1842 /* 1843 * Restore timer state regardless, as e.g. Cause.TI can change over time 1844 * if left unmaintained. 1845 */ 1846 kvm_vz_restore_timer(vcpu); 1847 1848 /* Don't bother restoring registers multiple times unless necessary */ 1849 if (!all) 1850 return 0; 1851 1852 /* 1853 * Restore config registers first, as some implementations restrict 1854 * writes to other registers when the corresponding feature bits aren't 1855 * set. For example Status.CU1 cannot be set unless Config1.FP is set. 1856 */ 1857 kvm_restore_gc0_config(cop0); 1858 if (cpu_guest_has_conf1) 1859 kvm_restore_gc0_config1(cop0); 1860 if (cpu_guest_has_conf2) 1861 kvm_restore_gc0_config2(cop0); 1862 if (cpu_guest_has_conf3) 1863 kvm_restore_gc0_config3(cop0); 1864 if (cpu_guest_has_conf4) 1865 kvm_restore_gc0_config4(cop0); 1866 if (cpu_guest_has_conf5) 1867 kvm_restore_gc0_config5(cop0); 1868 if (cpu_guest_has_conf6) 1869 kvm_restore_gc0_config6(cop0); 1870 if (cpu_guest_has_conf7) 1871 kvm_restore_gc0_config7(cop0); 1872 1873 kvm_restore_gc0_index(cop0); 1874 kvm_restore_gc0_entrylo0(cop0); 1875 kvm_restore_gc0_entrylo1(cop0); 1876 kvm_restore_gc0_context(cop0); 1877 #ifdef CONFIG_64BIT 1878 kvm_restore_gc0_xcontext(cop0); 1879 #endif 1880 kvm_restore_gc0_pagemask(cop0); 1881 kvm_restore_gc0_pagegrain(cop0); 1882 kvm_restore_gc0_hwrena(cop0); 1883 kvm_restore_gc0_badvaddr(cop0); 1884 kvm_restore_gc0_entryhi(cop0); 1885 kvm_restore_gc0_status(cop0); 1886 kvm_restore_gc0_intctl(cop0); 1887 kvm_restore_gc0_epc(cop0); 1888 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0)); 1889 if (cpu_guest_has_userlocal) 1890 kvm_restore_gc0_userlocal(cop0); 1891 1892 kvm_restore_gc0_errorepc(cop0); 1893 1894 /* restore KScratch registers if enabled in guest */ 1895 if (cpu_guest_has_conf4) { 1896 if (cpu_guest_has_kscr(2)) 1897 kvm_restore_gc0_kscratch1(cop0); 1898 if (cpu_guest_has_kscr(3)) 1899 kvm_restore_gc0_kscratch2(cop0); 1900 if (cpu_guest_has_kscr(4)) 1901 kvm_restore_gc0_kscratch3(cop0); 1902 if (cpu_guest_has_kscr(5)) 1903 kvm_restore_gc0_kscratch4(cop0); 1904 if (cpu_guest_has_kscr(6)) 1905 kvm_restore_gc0_kscratch5(cop0); 1906 if (cpu_guest_has_kscr(7)) 1907 kvm_restore_gc0_kscratch6(cop0); 1908 } 1909 1910 if (cpu_guest_has_badinstr) 1911 kvm_restore_gc0_badinstr(cop0); 1912 if (cpu_guest_has_badinstrp) 1913 kvm_restore_gc0_badinstrp(cop0); 1914 1915 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */ 1916 if (cpu_has_guestctl2) 1917 write_c0_guestctl2( 1918 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); 1919 1920 return 0; 1921 } 1922 1923 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) 1924 { 1925 struct mips_coproc *cop0 = vcpu->arch.cop0; 1926 1927 if (current->flags & PF_VCPU) 1928 kvm_vz_vcpu_save_wired(vcpu); 1929 1930 kvm_lose_fpu(vcpu); 1931 1932 kvm_save_gc0_index(cop0); 1933 kvm_save_gc0_entrylo0(cop0); 1934 kvm_save_gc0_entrylo1(cop0); 1935 kvm_save_gc0_context(cop0); 1936 #ifdef CONFIG_64BIT 1937 kvm_save_gc0_xcontext(cop0); 1938 #endif 1939 kvm_save_gc0_pagemask(cop0); 1940 kvm_save_gc0_pagegrain(cop0); 1941 kvm_save_gc0_wired(cop0); 1942 /* allow wired TLB entries to be overwritten */ 1943 clear_gc0_wired(MIPSR6_WIRED_WIRED); 1944 kvm_save_gc0_hwrena(cop0); 1945 kvm_save_gc0_badvaddr(cop0); 1946 kvm_save_gc0_entryhi(cop0); 1947 kvm_save_gc0_status(cop0); 1948 kvm_save_gc0_intctl(cop0); 1949 kvm_save_gc0_epc(cop0); 1950 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase()); 1951 if (cpu_guest_has_userlocal) 1952 kvm_save_gc0_userlocal(cop0); 1953 1954 /* only save implemented config registers */ 1955 kvm_save_gc0_config(cop0); 1956 if (cpu_guest_has_conf1) 1957 kvm_save_gc0_config1(cop0); 1958 if (cpu_guest_has_conf2) 1959 kvm_save_gc0_config2(cop0); 1960 if (cpu_guest_has_conf3) 1961 kvm_save_gc0_config3(cop0); 1962 if (cpu_guest_has_conf4) 1963 kvm_save_gc0_config4(cop0); 1964 if (cpu_guest_has_conf5) 1965 kvm_save_gc0_config5(cop0); 1966 if (cpu_guest_has_conf6) 1967 kvm_save_gc0_config6(cop0); 1968 if (cpu_guest_has_conf7) 1969 kvm_save_gc0_config7(cop0); 1970 1971 kvm_save_gc0_errorepc(cop0); 1972 1973 /* save KScratch registers if enabled in guest */ 1974 if (cpu_guest_has_conf4) { 1975 if (cpu_guest_has_kscr(2)) 1976 kvm_save_gc0_kscratch1(cop0); 1977 if (cpu_guest_has_kscr(3)) 1978 kvm_save_gc0_kscratch2(cop0); 1979 if (cpu_guest_has_kscr(4)) 1980 kvm_save_gc0_kscratch3(cop0); 1981 if (cpu_guest_has_kscr(5)) 1982 kvm_save_gc0_kscratch4(cop0); 1983 if (cpu_guest_has_kscr(6)) 1984 kvm_save_gc0_kscratch5(cop0); 1985 if (cpu_guest_has_kscr(7)) 1986 kvm_save_gc0_kscratch6(cop0); 1987 } 1988 1989 if (cpu_guest_has_badinstr) 1990 kvm_save_gc0_badinstr(cop0); 1991 if (cpu_guest_has_badinstrp) 1992 kvm_save_gc0_badinstrp(cop0); 1993 1994 kvm_vz_save_timer(vcpu); 1995 1996 /* save Root.GuestCtl2 in unused Guest guestctl2 register */ 1997 if (cpu_has_guestctl2) 1998 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 1999 read_c0_guestctl2(); 2000 2001 return 0; 2002 } 2003 2004 /** 2005 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB. 2006 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries). 2007 * 2008 * Attempt to resize the guest VTLB by writing guest Config registers. This is 2009 * necessary for cores with a shared root/guest TLB to avoid overlap with wired 2010 * entries in the root VTLB. 2011 * 2012 * Returns: The resulting guest VTLB size. 2013 */ 2014 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size) 2015 { 2016 unsigned int config4 = 0, ret = 0, limit; 2017 2018 /* Write MMUSize - 1 into guest Config registers */ 2019 if (cpu_guest_has_conf1) 2020 change_gc0_config1(MIPS_CONF1_TLBS, 2021 (size - 1) << MIPS_CONF1_TLBS_SHIFT); 2022 if (cpu_guest_has_conf4) { 2023 config4 = read_gc0_config4(); 2024 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 2025 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) { 2026 config4 &= ~MIPS_CONF4_VTLBSIZEEXT; 2027 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 2028 MIPS_CONF4_VTLBSIZEEXT_SHIFT; 2029 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 2030 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) { 2031 config4 &= ~MIPS_CONF4_MMUSIZEEXT; 2032 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 2033 MIPS_CONF4_MMUSIZEEXT_SHIFT; 2034 } 2035 write_gc0_config4(config4); 2036 } 2037 2038 /* 2039 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it 2040 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write 2041 * not dropped) 2042 */ 2043 if (cpu_has_mips_r6) { 2044 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >> 2045 MIPSR6_WIRED_LIMIT_SHIFT; 2046 if (size - 1 <= limit) 2047 limit = 0; 2048 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT); 2049 } 2050 2051 /* Read back MMUSize - 1 */ 2052 back_to_back_c0_hazard(); 2053 if (cpu_guest_has_conf1) 2054 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >> 2055 MIPS_CONF1_TLBS_SHIFT; 2056 if (config4) { 2057 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 2058 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) 2059 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> 2060 MIPS_CONF4_VTLBSIZEEXT_SHIFT) << 2061 MIPS_CONF1_TLBS_SIZE; 2062 else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 2063 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) 2064 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >> 2065 MIPS_CONF4_MMUSIZEEXT_SHIFT) << 2066 MIPS_CONF1_TLBS_SIZE; 2067 } 2068 return ret + 1; 2069 } 2070 2071 static int kvm_vz_hardware_enable(void) 2072 { 2073 unsigned int mmu_size, guest_mmu_size, ftlb_size; 2074 2075 /* 2076 * ImgTec cores tend to use a shared root/guest TLB. To avoid overlap of 2077 * root wired and guest entries, the guest TLB may need resizing. 2078 */ 2079 mmu_size = current_cpu_data.tlbsizevtlb; 2080 ftlb_size = current_cpu_data.tlbsize - mmu_size; 2081 2082 /* Try switching to maximum guest VTLB size for flush */ 2083 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size); 2084 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2085 kvm_vz_local_flush_guesttlb_all(); 2086 2087 /* 2088 * Reduce to make space for root wired entries and at least 2 root 2089 * non-wired entries. This does assume that long-term wired entries 2090 * won't be added later. 2091 */ 2092 guest_mmu_size = mmu_size - num_wired_entries() - 2; 2093 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size); 2094 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2095 2096 /* 2097 * Write the VTLB size, but if another CPU has already written, check it 2098 * matches or we won't provide a consistent view to the guest. If this 2099 * ever happens it suggests an asymmetric number of wired entries. 2100 */ 2101 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) && 2102 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size, 2103 "Available guest VTLB size mismatch")) 2104 return -EINVAL; 2105 2106 /* 2107 * Enable virtualization features granting guest direct control of 2108 * certain features: 2109 * CP0=1: Guest coprocessor 0 context. 2110 * AT=Guest: Guest MMU. 2111 * CG=1: Hit (virtual address) CACHE operations (optional). 2112 * CF=1: Guest Config registers. 2113 * CGI=1: Indexed flush CACHE operations (optional). 2114 */ 2115 write_c0_guestctl0(MIPS_GCTL0_CP0 | 2116 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) | 2117 MIPS_GCTL0_CG | MIPS_GCTL0_CF); 2118 if (cpu_has_guestctl0ext) 2119 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); 2120 2121 if (cpu_has_guestid) { 2122 write_c0_guestctl1(0); 2123 kvm_vz_local_flush_roottlb_all_guests(); 2124 2125 GUESTID_MASK = current_cpu_data.guestid_mask; 2126 GUESTID_FIRST_VERSION = GUESTID_MASK + 1; 2127 GUESTID_VERSION_MASK = ~GUESTID_MASK; 2128 2129 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION; 2130 } 2131 2132 /* clear any pending injected virtual guest interrupts */ 2133 if (cpu_has_guestctl2) 2134 clear_c0_guestctl2(0x3f << 10); 2135 2136 return 0; 2137 } 2138 2139 static void kvm_vz_hardware_disable(void) 2140 { 2141 kvm_vz_local_flush_guesttlb_all(); 2142 2143 if (cpu_has_guestid) { 2144 write_c0_guestctl1(0); 2145 kvm_vz_local_flush_roottlb_all_guests(); 2146 } 2147 } 2148 2149 static int kvm_vz_check_extension(struct kvm *kvm, long ext) 2150 { 2151 int r; 2152 2153 switch (ext) { 2154 case KVM_CAP_MIPS_VZ: 2155 /* we wouldn't be here unless cpu_has_vz */ 2156 r = 1; 2157 break; 2158 #ifdef CONFIG_64BIT 2159 case KVM_CAP_MIPS_64BIT: 2160 /* We support 64-bit registers/operations and addresses */ 2161 r = 2; 2162 break; 2163 #endif 2164 default: 2165 r = 0; 2166 break; 2167 } 2168 2169 return r; 2170 } 2171 2172 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu) 2173 { 2174 int i; 2175 2176 for_each_possible_cpu(i) 2177 vcpu->arch.vzguestid[i] = 0; 2178 2179 return 0; 2180 } 2181 2182 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu) 2183 { 2184 int cpu; 2185 2186 /* 2187 * If the VCPU is freed and reused as another VCPU, we don't want the 2188 * matching pointer wrongly hanging around in last_vcpu[] or 2189 * last_exec_vcpu[]. 2190 */ 2191 for_each_possible_cpu(cpu) { 2192 if (last_vcpu[cpu] == vcpu) 2193 last_vcpu[cpu] = NULL; 2194 if (last_exec_vcpu[cpu] == vcpu) 2195 last_exec_vcpu[cpu] = NULL; 2196 } 2197 } 2198 2199 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) 2200 { 2201 struct mips_coproc *cop0 = vcpu->arch.cop0; 2202 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ 2203 2204 /* 2205 * Start off the timer at the same frequency as the host timer, but the 2206 * soft timer doesn't handle frequencies greater than 1GHz yet. 2207 */ 2208 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC) 2209 count_hz = mips_hpt_frequency; 2210 kvm_mips_init_count(vcpu, count_hz); 2211 2212 /* 2213 * Initialize guest register state to valid architectural reset state. 2214 */ 2215 2216 /* PageGrain */ 2217 if (cpu_has_mips_r6) 2218 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC); 2219 /* Wired */ 2220 if (cpu_has_mips_r6) 2221 kvm_write_sw_gc0_wired(cop0, 2222 read_gc0_wired() & MIPSR6_WIRED_LIMIT); 2223 /* Status */ 2224 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL); 2225 if (cpu_has_mips_r6) 2226 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status()); 2227 /* IntCtl */ 2228 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() & 2229 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI)); 2230 /* PRId */ 2231 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id); 2232 /* EBase */ 2233 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); 2234 /* Config */ 2235 kvm_save_gc0_config(cop0); 2236 /* architecturally writable (e.g. from guest) */ 2237 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK, 2238 _page_cachable_default >> _CACHE_SHIFT); 2239 /* architecturally read only, but maybe writable from root */ 2240 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config()); 2241 if (cpu_guest_has_conf1) { 2242 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M); 2243 /* Config1 */ 2244 kvm_save_gc0_config1(cop0); 2245 /* architecturally read only, but maybe writable from root */ 2246 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 | 2247 MIPS_CONF1_MD | 2248 MIPS_CONF1_PC | 2249 MIPS_CONF1_WR | 2250 MIPS_CONF1_CA | 2251 MIPS_CONF1_FP); 2252 } 2253 if (cpu_guest_has_conf2) { 2254 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M); 2255 /* Config2 */ 2256 kvm_save_gc0_config2(cop0); 2257 } 2258 if (cpu_guest_has_conf3) { 2259 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M); 2260 /* Config3 */ 2261 kvm_save_gc0_config3(cop0); 2262 /* architecturally writable (e.g. from guest) */ 2263 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE); 2264 /* architecturally read only, but maybe writable from root */ 2265 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA | 2266 MIPS_CONF3_BPG | 2267 MIPS_CONF3_ULRI | 2268 MIPS_CONF3_DSP | 2269 MIPS_CONF3_CTXTC | 2270 MIPS_CONF3_ITL | 2271 MIPS_CONF3_LPA | 2272 MIPS_CONF3_VEIC | 2273 MIPS_CONF3_VINT | 2274 MIPS_CONF3_SP | 2275 MIPS_CONF3_CDMM | 2276 MIPS_CONF3_MT | 2277 MIPS_CONF3_SM | 2278 MIPS_CONF3_TL); 2279 } 2280 if (cpu_guest_has_conf4) { 2281 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M); 2282 /* Config4 */ 2283 kvm_save_gc0_config4(cop0); 2284 } 2285 if (cpu_guest_has_conf5) { 2286 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M); 2287 /* Config5 */ 2288 kvm_save_gc0_config5(cop0); 2289 /* architecturally writable (e.g. from guest) */ 2290 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K | 2291 MIPS_CONF5_CV | 2292 MIPS_CONF5_MSAEN | 2293 MIPS_CONF5_UFE | 2294 MIPS_CONF5_FRE | 2295 MIPS_CONF5_SBRI | 2296 MIPS_CONF5_UFR); 2297 /* architecturally read only, but maybe writable from root */ 2298 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP); 2299 } 2300 2301 /* start with no pending virtual guest interrupts */ 2302 if (cpu_has_guestctl2) 2303 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; 2304 2305 /* Put PC at reset vector */ 2306 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); 2307 2308 return 0; 2309 } 2310 2311 static void kvm_vz_flush_shadow_all(struct kvm *kvm) 2312 { 2313 if (cpu_has_guestid) { 2314 /* Flush GuestID for each VCPU individually */ 2315 kvm_flush_remote_tlbs(kvm); 2316 } else { 2317 /* 2318 * For each CPU there is a single GPA ASID used by all VCPUs in 2319 * the VM, so it doesn't make sense for the VCPUs to handle 2320 * invalidation of these ASIDs individually. 2321 * 2322 * Instead mark all CPUs as needing ASID invalidation in 2323 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to 2324 * kick any running VCPUs so they check asid_flush_mask. 2325 */ 2326 cpumask_setall(&kvm->arch.asid_flush_mask); 2327 kvm_flush_remote_tlbs(kvm); 2328 } 2329 } 2330 2331 static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, 2332 const struct kvm_memory_slot *slot) 2333 { 2334 kvm_vz_flush_shadow_all(kvm); 2335 } 2336 2337 static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu) 2338 { 2339 int cpu = smp_processor_id(); 2340 int preserve_guest_tlb; 2341 2342 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu); 2343 2344 if (preserve_guest_tlb) 2345 kvm_vz_vcpu_save_wired(vcpu); 2346 2347 kvm_vz_vcpu_load_tlb(vcpu, cpu); 2348 2349 if (preserve_guest_tlb) 2350 kvm_vz_vcpu_load_wired(vcpu); 2351 } 2352 2353 static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) 2354 { 2355 int cpu = smp_processor_id(); 2356 int r; 2357 2358 /* Check if we have any exceptions/interrupts pending */ 2359 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause()); 2360 2361 kvm_vz_check_requests(vcpu, cpu); 2362 kvm_vz_vcpu_load_tlb(vcpu, cpu); 2363 kvm_vz_vcpu_load_wired(vcpu); 2364 2365 r = vcpu->arch.vcpu_run(run, vcpu); 2366 2367 kvm_vz_vcpu_save_wired(vcpu); 2368 2369 return r; 2370 } 2371 2372 static struct kvm_mips_callbacks kvm_vz_callbacks = { 2373 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable, 2374 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss, 2375 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss, 2376 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss, 2377 .handle_addr_err_st = kvm_trap_vz_no_handler, 2378 .handle_addr_err_ld = kvm_trap_vz_no_handler, 2379 .handle_syscall = kvm_trap_vz_no_handler, 2380 .handle_res_inst = kvm_trap_vz_no_handler, 2381 .handle_break = kvm_trap_vz_no_handler, 2382 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled, 2383 .handle_guest_exit = kvm_trap_vz_handle_guest_exit, 2384 2385 .hardware_enable = kvm_vz_hardware_enable, 2386 .hardware_disable = kvm_vz_hardware_disable, 2387 .check_extension = kvm_vz_check_extension, 2388 .vcpu_init = kvm_vz_vcpu_init, 2389 .vcpu_uninit = kvm_vz_vcpu_uninit, 2390 .vcpu_setup = kvm_vz_vcpu_setup, 2391 .flush_shadow_all = kvm_vz_flush_shadow_all, 2392 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot, 2393 .gva_to_gpa = kvm_vz_gva_to_gpa_cb, 2394 .queue_timer_int = kvm_vz_queue_timer_int_cb, 2395 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb, 2396 .queue_io_int = kvm_vz_queue_io_int_cb, 2397 .dequeue_io_int = kvm_vz_dequeue_io_int_cb, 2398 .irq_deliver = kvm_vz_irq_deliver_cb, 2399 .irq_clear = kvm_vz_irq_clear_cb, 2400 .num_regs = kvm_vz_num_regs, 2401 .copy_reg_indices = kvm_vz_copy_reg_indices, 2402 .get_one_reg = kvm_vz_get_one_reg, 2403 .set_one_reg = kvm_vz_set_one_reg, 2404 .vcpu_load = kvm_vz_vcpu_load, 2405 .vcpu_put = kvm_vz_vcpu_put, 2406 .vcpu_run = kvm_vz_vcpu_run, 2407 .vcpu_reenter = kvm_vz_vcpu_reenter, 2408 }; 2409 2410 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) 2411 { 2412 if (!cpu_has_vz) 2413 return -ENODEV; 2414 2415 /* 2416 * VZ requires at least 2 KScratch registers, so it should have been 2417 * possible to allocate pgd_reg. 2418 */ 2419 if (WARN(pgd_reg == -1, 2420 "pgd_reg not allocated even though cpu_has_vz\n")) 2421 return -ENODEV; 2422 2423 pr_info("Starting KVM with MIPS VZ extensions\n"); 2424 2425 *install_callbacks = &kvm_vz_callbacks; 2426 return 0; 2427 } 2428