1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: Support for hardware virtualization extensions 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Yann Le Du <ledu@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/preempt.h> 16 #include <linux/vmalloc.h> 17 #include <asm/cacheflush.h> 18 #include <asm/cacheops.h> 19 #include <asm/cmpxchg.h> 20 #include <asm/fpu.h> 21 #include <asm/hazards.h> 22 #include <asm/inst.h> 23 #include <asm/mmu_context.h> 24 #include <asm/r4kcache.h> 25 #include <asm/time.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbex.h> 28 29 #include <linux/kvm_host.h> 30 31 #include "interrupt.h" 32 33 #include "trace.h" 34 35 /* Pointers to last VCPU loaded on each physical CPU */ 36 static struct kvm_vcpu *last_vcpu[NR_CPUS]; 37 /* Pointers to last VCPU executed on each physical CPU */ 38 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS]; 39 40 /* 41 * Number of guest VTLB entries to use, so we can catch inconsistency between 42 * CPUs. 43 */ 44 static unsigned int kvm_vz_guest_vtlb_size; 45 46 static inline long kvm_vz_read_gc0_ebase(void) 47 { 48 if (sizeof(long) == 8 && cpu_has_ebase_wg) 49 return read_gc0_ebase_64(); 50 else 51 return read_gc0_ebase(); 52 } 53 54 static inline void kvm_vz_write_gc0_ebase(long v) 55 { 56 /* 57 * First write with WG=1 to write upper bits, then write again in case 58 * WG should be left at 0. 59 * write_gc0_ebase_64() is no longer UNDEFINED since R6. 60 */ 61 if (sizeof(long) == 8 && 62 (cpu_has_mips64r6 || cpu_has_ebase_wg)) { 63 write_gc0_ebase_64(v | MIPS_EBASE_WG); 64 write_gc0_ebase_64(v); 65 } else { 66 write_gc0_ebase(v | MIPS_EBASE_WG); 67 write_gc0_ebase(v); 68 } 69 } 70 71 /* 72 * These Config bits may be writable by the guest: 73 * Config: [K23, KU] (!TLB), K0 74 * Config1: (none) 75 * Config2: [TU, SU] (impl) 76 * Config3: ISAOnExc 77 * Config4: FTLBPageSize 78 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR 79 */ 80 81 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu) 82 { 83 return CONF_CM_CMASK; 84 } 85 86 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu) 87 { 88 return 0; 89 } 90 91 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu) 92 { 93 return 0; 94 } 95 96 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu) 97 { 98 return MIPS_CONF3_ISA_OE; 99 } 100 101 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu) 102 { 103 /* no need to be exact */ 104 return MIPS_CONF4_VFTLBPAGESIZE; 105 } 106 107 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu) 108 { 109 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI; 110 111 /* Permit MSAEn changes if MSA supported and enabled */ 112 if (kvm_mips_guest_has_msa(&vcpu->arch)) 113 mask |= MIPS_CONF5_MSAEN; 114 115 /* 116 * Permit guest FPU mode changes if FPU is enabled and the relevant 117 * feature exists according to FIR register. 118 */ 119 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 120 if (cpu_has_ufr) 121 mask |= MIPS_CONF5_UFR; 122 if (cpu_has_fre) 123 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE; 124 } 125 126 return mask; 127 } 128 129 /* 130 * VZ optionally allows these additional Config bits to be written by root: 131 * Config: M, [MT] 132 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP 133 * Config2: M 134 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC, 135 * VInt, SP, CDMM, MT, SM, TL] 136 * Config4: M, [VTLBSizeExt, MMUSizeExt] 137 * Config5: [MRP] 138 */ 139 140 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu) 141 { 142 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M; 143 } 144 145 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu) 146 { 147 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M; 148 149 /* Permit FPU to be present if FPU is supported */ 150 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 151 mask |= MIPS_CONF1_FP; 152 153 return mask; 154 } 155 156 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu) 157 { 158 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M; 159 } 160 161 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu) 162 { 163 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M | 164 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC; 165 166 /* Permit MSA to be present if MSA is supported */ 167 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 168 mask |= MIPS_CONF3_MSA; 169 170 return mask; 171 } 172 173 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu) 174 { 175 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M; 176 } 177 178 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu) 179 { 180 return kvm_vz_config5_guest_wrmask(vcpu); 181 } 182 183 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) 184 { 185 /* VZ guest has already converted gva to gpa */ 186 return gva; 187 } 188 189 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 190 { 191 set_bit(priority, &vcpu->arch.pending_exceptions); 192 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 193 } 194 195 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 196 { 197 clear_bit(priority, &vcpu->arch.pending_exceptions); 198 set_bit(priority, &vcpu->arch.pending_exceptions_clr); 199 } 200 201 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu) 202 { 203 /* 204 * timer expiry is asynchronous to vcpu execution therefore defer guest 205 * cp0 accesses 206 */ 207 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 208 } 209 210 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) 211 { 212 /* 213 * timer expiry is asynchronous to vcpu execution therefore defer guest 214 * cp0 accesses 215 */ 216 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); 217 } 218 219 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu, 220 struct kvm_mips_interrupt *irq) 221 { 222 int intr = (int)irq->irq; 223 224 /* 225 * interrupts are asynchronous to vcpu execution therefore defer guest 226 * cp0 accesses 227 */ 228 switch (intr) { 229 case 2: 230 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO); 231 break; 232 233 case 3: 234 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); 235 break; 236 237 case 4: 238 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); 239 break; 240 241 default: 242 break; 243 } 244 245 } 246 247 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 248 struct kvm_mips_interrupt *irq) 249 { 250 int intr = (int)irq->irq; 251 252 /* 253 * interrupts are asynchronous to vcpu execution therefore defer guest 254 * cp0 accesses 255 */ 256 switch (intr) { 257 case -2: 258 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO); 259 break; 260 261 case -3: 262 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); 263 break; 264 265 case -4: 266 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); 267 break; 268 269 default: 270 break; 271 } 272 273 } 274 275 static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = { 276 [MIPS_EXC_INT_TIMER] = C_IRQ5, 277 [MIPS_EXC_INT_IO] = C_IRQ0, 278 [MIPS_EXC_INT_IPI_1] = C_IRQ1, 279 [MIPS_EXC_INT_IPI_2] = C_IRQ2, 280 }; 281 282 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 283 u32 cause) 284 { 285 u32 irq = (priority < MIPS_EXC_MAX) ? 286 kvm_vz_priority_to_irq[priority] : 0; 287 288 switch (priority) { 289 case MIPS_EXC_INT_TIMER: 290 set_gc0_cause(C_TI); 291 break; 292 293 case MIPS_EXC_INT_IO: 294 case MIPS_EXC_INT_IPI_1: 295 case MIPS_EXC_INT_IPI_2: 296 if (cpu_has_guestctl2) 297 set_c0_guestctl2(irq); 298 else 299 set_gc0_cause(irq); 300 break; 301 302 default: 303 break; 304 } 305 306 clear_bit(priority, &vcpu->arch.pending_exceptions); 307 return 1; 308 } 309 310 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 311 u32 cause) 312 { 313 u32 irq = (priority < MIPS_EXC_MAX) ? 314 kvm_vz_priority_to_irq[priority] : 0; 315 316 switch (priority) { 317 case MIPS_EXC_INT_TIMER: 318 /* 319 * Call to kvm_write_c0_guest_compare() clears Cause.TI in 320 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with 321 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not 322 * supported or if not using GuestCtl2 Hardware Clear. 323 */ 324 if (cpu_has_guestctl2) { 325 if (!(read_c0_guestctl2() & (irq << 14))) 326 clear_c0_guestctl2(irq); 327 } else { 328 clear_gc0_cause(irq); 329 } 330 break; 331 332 case MIPS_EXC_INT_IO: 333 case MIPS_EXC_INT_IPI_1: 334 case MIPS_EXC_INT_IPI_2: 335 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ 336 if (cpu_has_guestctl2) { 337 if (!(read_c0_guestctl2() & (irq << 14))) 338 clear_c0_guestctl2(irq); 339 } else { 340 clear_gc0_cause(irq); 341 } 342 break; 343 344 default: 345 break; 346 } 347 348 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 349 return 1; 350 } 351 352 /* 353 * VZ guest timer handling. 354 */ 355 356 /** 357 * _kvm_vz_restore_stimer() - Restore soft timer state. 358 * @vcpu: Virtual CPU. 359 * @compare: CP0_Compare register value, restored by caller. 360 * @cause: CP0_Cause register to restore. 361 * 362 * Restore VZ state relating to the soft timer. 363 */ 364 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare, 365 u32 cause) 366 { 367 /* 368 * Avoid spurious counter interrupts by setting Guest CP0_Count to just 369 * after Guest CP0_Compare. 370 */ 371 write_c0_gtoffset(compare - read_c0_count()); 372 373 back_to_back_c0_hazard(); 374 write_gc0_cause(cause); 375 } 376 377 /** 378 * kvm_vz_restore_timer() - Restore guest timer state. 379 * @vcpu: Virtual CPU. 380 * 381 * Restore soft timer state from saved context. 382 */ 383 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) 384 { 385 struct mips_coproc *cop0 = vcpu->arch.cop0; 386 u32 cause, compare; 387 388 compare = kvm_read_sw_gc0_compare(cop0); 389 cause = kvm_read_sw_gc0_cause(cop0); 390 391 write_gc0_compare(compare); 392 _kvm_vz_restore_stimer(vcpu, compare, cause); 393 } 394 395 /** 396 * kvm_vz_save_timer() - Save guest timer state. 397 * @vcpu: Virtual CPU. 398 * 399 * Save VZ guest timer state. 400 */ 401 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) 402 { 403 struct mips_coproc *cop0 = vcpu->arch.cop0; 404 u32 compare, cause; 405 406 compare = read_gc0_compare(); 407 cause = read_gc0_cause(); 408 409 /* save timer-related state to VCPU context */ 410 kvm_write_sw_gc0_cause(cop0, cause); 411 kvm_write_sw_gc0_compare(cop0, compare); 412 } 413 414 /** 415 * is_eva_access() - Find whether an instruction is an EVA memory accessor. 416 * @inst: 32-bit instruction encoding. 417 * 418 * Finds whether @inst encodes an EVA memory access instruction, which would 419 * indicate that emulation of it should access the user mode address space 420 * instead of the kernel mode address space. This matters for MUSUK segments 421 * which are TLB mapped for user mode but unmapped for kernel mode. 422 * 423 * Returns: Whether @inst encodes an EVA accessor instruction. 424 */ 425 static bool is_eva_access(union mips_instruction inst) 426 { 427 if (inst.spec3_format.opcode != spec3_op) 428 return false; 429 430 switch (inst.spec3_format.func) { 431 case lwle_op: 432 case lwre_op: 433 case cachee_op: 434 case sbe_op: 435 case she_op: 436 case sce_op: 437 case swe_op: 438 case swle_op: 439 case swre_op: 440 case prefe_op: 441 case lbue_op: 442 case lhue_op: 443 case lbe_op: 444 case lhe_op: 445 case lle_op: 446 case lwe_op: 447 return true; 448 default: 449 return false; 450 } 451 } 452 453 /** 454 * is_eva_am_mapped() - Find whether an access mode is mapped. 455 * @vcpu: KVM VCPU state. 456 * @am: 3-bit encoded access mode. 457 * @eu: Segment becomes unmapped and uncached when Status.ERL=1. 458 * 459 * Decode @am to find whether it encodes a mapped segment for the current VCPU 460 * state. Where necessary @eu and the actual instruction causing the fault are 461 * taken into account to make the decision. 462 * 463 * Returns: Whether the VCPU faulted on a TLB mapped address. 464 */ 465 static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu) 466 { 467 u32 am_lookup; 468 int err; 469 470 /* 471 * Interpret access control mode. We assume address errors will already 472 * have been caught by the guest, leaving us with: 473 * AM UM SM KM 31..24 23..16 474 * UK 0 000 Unm 0 0 475 * MK 1 001 TLB 1 476 * MSK 2 010 TLB TLB 1 477 * MUSK 3 011 TLB TLB TLB 1 478 * MUSUK 4 100 TLB TLB Unm 0 1 479 * USK 5 101 Unm Unm 0 0 480 * - 6 110 0 0 481 * UUSK 7 111 Unm Unm Unm 0 0 482 * 483 * We shift a magic value by AM across the sign bit to find if always 484 * TLB mapped, and if not shift by 8 again to find if it depends on KM. 485 */ 486 am_lookup = 0x70080000 << am; 487 if ((s32)am_lookup < 0) { 488 /* 489 * MK, MSK, MUSK 490 * Always TLB mapped, unless SegCtl.EU && ERL 491 */ 492 if (!eu || !(read_gc0_status() & ST0_ERL)) 493 return true; 494 } else { 495 am_lookup <<= 8; 496 if ((s32)am_lookup < 0) { 497 union mips_instruction inst; 498 unsigned int status; 499 u32 *opc; 500 501 /* 502 * MUSUK 503 * TLB mapped if not in kernel mode 504 */ 505 status = read_gc0_status(); 506 if (!(status & (ST0_EXL | ST0_ERL)) && 507 (status & ST0_KSU)) 508 return true; 509 /* 510 * EVA access instructions in kernel 511 * mode access user address space. 512 */ 513 opc = (u32 *)vcpu->arch.pc; 514 if (vcpu->arch.host_cp0_cause & CAUSEF_BD) 515 opc += 1; 516 err = kvm_get_badinstr(opc, vcpu, &inst.word); 517 if (!err && is_eva_access(inst)) 518 return true; 519 } 520 } 521 522 return false; 523 } 524 525 /** 526 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA. 527 * @vcpu: KVM VCPU state. 528 * @gva: Guest virtual address to convert. 529 * @gpa: Output guest physical address. 530 * 531 * Convert a guest virtual address (GVA) which is valid according to the guest 532 * context, to a guest physical address (GPA). 533 * 534 * Returns: 0 on success. 535 * -errno on failure. 536 */ 537 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 538 unsigned long *gpa) 539 { 540 u32 gva32 = gva; 541 unsigned long segctl; 542 543 if ((long)gva == (s32)gva32) { 544 /* Handle canonical 32-bit virtual address */ 545 if (cpu_guest_has_segments) { 546 unsigned long mask, pa; 547 548 switch (gva32 >> 29) { 549 case 0: 550 case 1: /* CFG5 (1GB) */ 551 segctl = read_gc0_segctl2() >> 16; 552 mask = (unsigned long)0xfc0000000ull; 553 break; 554 case 2: 555 case 3: /* CFG4 (1GB) */ 556 segctl = read_gc0_segctl2(); 557 mask = (unsigned long)0xfc0000000ull; 558 break; 559 case 4: /* CFG3 (512MB) */ 560 segctl = read_gc0_segctl1() >> 16; 561 mask = (unsigned long)0xfe0000000ull; 562 break; 563 case 5: /* CFG2 (512MB) */ 564 segctl = read_gc0_segctl1(); 565 mask = (unsigned long)0xfe0000000ull; 566 break; 567 case 6: /* CFG1 (512MB) */ 568 segctl = read_gc0_segctl0() >> 16; 569 mask = (unsigned long)0xfe0000000ull; 570 break; 571 case 7: /* CFG0 (512MB) */ 572 segctl = read_gc0_segctl0(); 573 mask = (unsigned long)0xfe0000000ull; 574 break; 575 default: 576 /* 577 * GCC 4.9 isn't smart enough to figure out that 578 * segctl and mask are always initialised. 579 */ 580 unreachable(); 581 } 582 583 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7, 584 segctl & 0x0008)) 585 goto tlb_mapped; 586 587 /* Unmapped, find guest physical address */ 588 pa = (segctl << 20) & mask; 589 pa |= gva32 & ~mask; 590 *gpa = pa; 591 return 0; 592 } else if ((s32)gva32 < (s32)0xc0000000) { 593 /* legacy unmapped KSeg0 or KSeg1 */ 594 *gpa = gva32 & 0x1fffffff; 595 return 0; 596 } 597 #ifdef CONFIG_64BIT 598 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { 599 /* XKPHYS */ 600 if (cpu_guest_has_segments) { 601 /* 602 * Each of the 8 regions can be overridden by SegCtl2.XR 603 * to use SegCtl1.XAM. 604 */ 605 segctl = read_gc0_segctl2(); 606 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { 607 segctl = read_gc0_segctl1(); 608 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7, 609 0)) 610 goto tlb_mapped; 611 } 612 613 } 614 /* 615 * Traditionally fully unmapped. 616 * Bits 61:59 specify the CCA, which we can just mask off here. 617 * Bits 58:PABITS should be zero, but we shouldn't have got here 618 * if it wasn't. 619 */ 620 *gpa = gva & 0x07ffffffffffffff; 621 return 0; 622 #endif 623 } 624 625 tlb_mapped: 626 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); 627 } 628 629 /** 630 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA. 631 * @vcpu: KVM VCPU state. 632 * @badvaddr: Root BadVAddr. 633 * @gpa: Output guest physical address. 634 * 635 * VZ implementations are permitted to report guest virtual addresses (GVA) in 636 * BadVAddr on a root exception during guest execution, instead of the more 637 * convenient guest physical addresses (GPA). When we get a GVA, this function 638 * converts it to a GPA, taking into account guest segmentation and guest TLB 639 * state. 640 * 641 * Returns: 0 on success. 642 * -errno on failure. 643 */ 644 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr, 645 unsigned long *gpa) 646 { 647 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & 648 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 649 650 /* If BadVAddr is GPA, then all is well in the world */ 651 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) { 652 *gpa = badvaddr; 653 return 0; 654 } 655 656 /* Otherwise we'd expect it to be GVA ... */ 657 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA, 658 "Unexpected gexccode %#x\n", gexccode)) 659 return -EINVAL; 660 661 /* ... and we need to perform the GVA->GPA translation in software */ 662 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa); 663 } 664 665 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu) 666 { 667 u32 *opc = (u32 *) vcpu->arch.pc; 668 u32 cause = vcpu->arch.host_cp0_cause; 669 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 670 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 671 u32 inst = 0; 672 673 /* 674 * Fetch the instruction. 675 */ 676 if (cause & CAUSEF_BD) 677 opc += 1; 678 kvm_get_badinstr(opc, vcpu, &inst); 679 680 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", 681 exccode, opc, inst, badvaddr, 682 read_gc0_status()); 683 kvm_arch_vcpu_dump_regs(vcpu); 684 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 685 return RESUME_HOST; 686 } 687 688 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, 689 u32 *opc, u32 cause, 690 struct kvm_run *run, 691 struct kvm_vcpu *vcpu) 692 { 693 struct mips_coproc *cop0 = vcpu->arch.cop0; 694 enum emulation_result er = EMULATE_DONE; 695 u32 rt, rd, sel; 696 unsigned long curr_pc; 697 unsigned long val; 698 699 /* 700 * Update PC and hold onto current PC in case there is 701 * an error and we want to rollback the PC 702 */ 703 curr_pc = vcpu->arch.pc; 704 er = update_pc(vcpu, cause); 705 if (er == EMULATE_FAIL) 706 return er; 707 708 if (inst.co_format.co) { 709 switch (inst.co_format.func) { 710 case wait_op: 711 er = kvm_mips_emul_wait(vcpu); 712 break; 713 default: 714 er = EMULATE_FAIL; 715 } 716 } else { 717 rt = inst.c0r_format.rt; 718 rd = inst.c0r_format.rd; 719 sel = inst.c0r_format.sel; 720 721 switch (inst.c0r_format.rs) { 722 case dmfc_op: 723 case mfc_op: 724 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 725 cop0->stat[rd][sel]++; 726 #endif 727 if (rd == MIPS_CP0_COUNT && 728 sel == 0) { /* Count */ 729 val = kvm_mips_read_count(vcpu); 730 } else if (rd == MIPS_CP0_COMPARE && 731 sel == 0) { /* Compare */ 732 val = read_gc0_compare(); 733 } else if ((rd == MIPS_CP0_PRID && 734 (sel == 0 || /* PRid */ 735 sel == 2 || /* CDMMBase */ 736 sel == 3)) || /* CMGCRBase */ 737 (rd == MIPS_CP0_STATUS && 738 (sel == 2 || /* SRSCtl */ 739 sel == 3)) || /* SRSMap */ 740 (rd == MIPS_CP0_CONFIG && 741 (sel == 7)) || /* Config7 */ 742 (rd == MIPS_CP0_ERRCTL && 743 (sel == 0))) { /* ErrCtl */ 744 val = cop0->reg[rd][sel]; 745 } else { 746 val = 0; 747 er = EMULATE_FAIL; 748 } 749 750 if (er != EMULATE_FAIL) { 751 /* Sign extend */ 752 if (inst.c0r_format.rs == mfc_op) 753 val = (int)val; 754 vcpu->arch.gprs[rt] = val; 755 } 756 757 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ? 758 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0, 759 KVM_TRACE_COP0(rd, sel), val); 760 break; 761 762 case dmtc_op: 763 case mtc_op: 764 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 765 cop0->stat[rd][sel]++; 766 #endif 767 val = vcpu->arch.gprs[rt]; 768 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ? 769 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0, 770 KVM_TRACE_COP0(rd, sel), val); 771 772 if (rd == MIPS_CP0_COUNT && 773 sel == 0) { /* Count */ 774 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 775 } else if (rd == MIPS_CP0_COMPARE && 776 sel == 0) { /* Compare */ 777 kvm_mips_write_compare(vcpu, 778 vcpu->arch.gprs[rt], 779 true); 780 } else if (rd == MIPS_CP0_ERRCTL && 781 (sel == 0)) { /* ErrCtl */ 782 /* ignore the written value */ 783 } else { 784 er = EMULATE_FAIL; 785 } 786 break; 787 788 default: 789 er = EMULATE_FAIL; 790 break; 791 } 792 } 793 /* Rollback PC only if emulation was unsuccessful */ 794 if (er == EMULATE_FAIL) { 795 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n", 796 curr_pc, __func__, inst.word); 797 798 vcpu->arch.pc = curr_pc; 799 } 800 801 return er; 802 } 803 804 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, 805 u32 *opc, u32 cause, 806 struct kvm_run *run, 807 struct kvm_vcpu *vcpu) 808 { 809 enum emulation_result er = EMULATE_DONE; 810 u32 cache, op_inst, op, base; 811 s16 offset; 812 struct kvm_vcpu_arch *arch = &vcpu->arch; 813 unsigned long va, curr_pc; 814 815 /* 816 * Update PC and hold onto current PC in case there is 817 * an error and we want to rollback the PC 818 */ 819 curr_pc = vcpu->arch.pc; 820 er = update_pc(vcpu, cause); 821 if (er == EMULATE_FAIL) 822 return er; 823 824 base = inst.i_format.rs; 825 op_inst = inst.i_format.rt; 826 if (cpu_has_mips_r6) 827 offset = inst.spec3_format.simmediate; 828 else 829 offset = inst.i_format.simmediate; 830 cache = op_inst & CacheOp_Cache; 831 op = op_inst & CacheOp_Op; 832 833 va = arch->gprs[base] + offset; 834 835 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 836 cache, op, base, arch->gprs[base], offset); 837 838 /* Secondary or tirtiary cache ops ignored */ 839 if (cache != Cache_I && cache != Cache_D) 840 return EMULATE_DONE; 841 842 switch (op_inst) { 843 case Index_Invalidate_I: 844 flush_icache_line_indexed(va); 845 return EMULATE_DONE; 846 case Index_Writeback_Inv_D: 847 flush_dcache_line_indexed(va); 848 return EMULATE_DONE; 849 default: 850 break; 851 }; 852 853 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 854 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], 855 offset); 856 /* Rollback PC */ 857 vcpu->arch.pc = curr_pc; 858 859 return EMULATE_FAIL; 860 } 861 862 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, 863 struct kvm_vcpu *vcpu) 864 { 865 enum emulation_result er = EMULATE_DONE; 866 struct kvm_vcpu_arch *arch = &vcpu->arch; 867 struct kvm_run *run = vcpu->run; 868 union mips_instruction inst; 869 int rd, rt, sel; 870 int err; 871 872 /* 873 * Fetch the instruction. 874 */ 875 if (cause & CAUSEF_BD) 876 opc += 1; 877 err = kvm_get_badinstr(opc, vcpu, &inst.word); 878 if (err) 879 return EMULATE_FAIL; 880 881 switch (inst.r_format.opcode) { 882 case cop0_op: 883 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu); 884 break; 885 #ifndef CONFIG_CPU_MIPSR6 886 case cache_op: 887 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 888 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 889 break; 890 #endif 891 case spec3_op: 892 switch (inst.spec3_format.func) { 893 #ifdef CONFIG_CPU_MIPSR6 894 case cache6_op: 895 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 896 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 897 break; 898 #endif 899 case rdhwr_op: 900 if (inst.r_format.rs || (inst.r_format.re >> 3)) 901 goto unknown; 902 903 rd = inst.r_format.rd; 904 rt = inst.r_format.rt; 905 sel = inst.r_format.re & 0x7; 906 907 switch (rd) { 908 case MIPS_HWR_CC: /* Read count register */ 909 arch->gprs[rt] = 910 (long)(int)kvm_mips_read_count(vcpu); 911 break; 912 default: 913 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 914 KVM_TRACE_HWR(rd, sel), 0); 915 goto unknown; 916 }; 917 918 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 919 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); 920 921 er = update_pc(vcpu, cause); 922 break; 923 default: 924 goto unknown; 925 }; 926 break; 927 unknown: 928 929 default: 930 kvm_err("GPSI exception not supported (%p/%#x)\n", 931 opc, inst.word); 932 kvm_arch_vcpu_dump_regs(vcpu); 933 er = EMULATE_FAIL; 934 break; 935 } 936 937 return er; 938 } 939 940 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc, 941 struct kvm_vcpu *vcpu) 942 { 943 enum emulation_result er = EMULATE_DONE; 944 struct kvm_vcpu_arch *arch = &vcpu->arch; 945 union mips_instruction inst; 946 int err; 947 948 /* 949 * Fetch the instruction. 950 */ 951 if (cause & CAUSEF_BD) 952 opc += 1; 953 err = kvm_get_badinstr(opc, vcpu, &inst.word); 954 if (err) 955 return EMULATE_FAIL; 956 957 /* complete MTC0 on behalf of guest and advance EPC */ 958 if (inst.c0r_format.opcode == cop0_op && 959 inst.c0r_format.rs == mtc_op && 960 inst.c0r_format.z == 0) { 961 int rt = inst.c0r_format.rt; 962 int rd = inst.c0r_format.rd; 963 int sel = inst.c0r_format.sel; 964 unsigned int val = arch->gprs[rt]; 965 unsigned int old_val, change; 966 967 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel), 968 val); 969 970 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 971 /* FR bit should read as zero if no FPU */ 972 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 973 val &= ~(ST0_CU1 | ST0_FR); 974 975 /* 976 * Also don't allow FR to be set if host doesn't support 977 * it. 978 */ 979 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) 980 val &= ~ST0_FR; 981 982 old_val = read_gc0_status(); 983 change = val ^ old_val; 984 985 if (change & ST0_FR) { 986 /* 987 * FPU and Vector register state is made 988 * UNPREDICTABLE by a change of FR, so don't 989 * even bother saving it. 990 */ 991 kvm_drop_fpu(vcpu); 992 } 993 994 /* 995 * If MSA state is already live, it is undefined how it 996 * interacts with FR=0 FPU state, and we don't want to 997 * hit reserved instruction exceptions trying to save 998 * the MSA state later when CU=1 && FR=1, so play it 999 * safe and save it first. 1000 */ 1001 if (change & ST0_CU1 && !(val & ST0_FR) && 1002 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1003 kvm_lose_fpu(vcpu); 1004 1005 write_gc0_status(val); 1006 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1007 u32 old_cause = read_gc0_cause(); 1008 u32 change = old_cause ^ val; 1009 1010 /* DC bit enabling/disabling timer? */ 1011 if (change & CAUSEF_DC) { 1012 if (val & CAUSEF_DC) 1013 kvm_mips_count_disable_cause(vcpu); 1014 else 1015 kvm_mips_count_enable_cause(vcpu); 1016 } 1017 1018 /* Only certain bits are RW to the guest */ 1019 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP | 1020 CAUSEF_IP0 | CAUSEF_IP1); 1021 1022 /* WP can only be cleared */ 1023 change &= ~CAUSEF_WP | old_cause; 1024 1025 write_gc0_cause(old_cause ^ change); 1026 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */ 1027 write_gc0_intctl(val); 1028 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 1029 old_val = read_gc0_config5(); 1030 change = val ^ old_val; 1031 /* Handle changes in FPU/MSA modes */ 1032 preempt_disable(); 1033 1034 /* 1035 * Propagate FRE changes immediately if the FPU 1036 * context is already loaded. 1037 */ 1038 if (change & MIPS_CONF5_FRE && 1039 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1040 change_c0_config5(MIPS_CONF5_FRE, val); 1041 1042 preempt_enable(); 1043 1044 val = old_val ^ 1045 (change & kvm_vz_config5_guest_wrmask(vcpu)); 1046 write_gc0_config5(val); 1047 } else { 1048 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n", 1049 opc, inst.word); 1050 er = EMULATE_FAIL; 1051 } 1052 1053 if (er != EMULATE_FAIL) 1054 er = update_pc(vcpu, cause); 1055 } else { 1056 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n", 1057 opc, inst.word); 1058 er = EMULATE_FAIL; 1059 } 1060 1061 return er; 1062 } 1063 1064 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc, 1065 struct kvm_vcpu *vcpu) 1066 { 1067 enum emulation_result er; 1068 union mips_instruction inst; 1069 unsigned long curr_pc; 1070 int err; 1071 1072 if (cause & CAUSEF_BD) 1073 opc += 1; 1074 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1075 if (err) 1076 return EMULATE_FAIL; 1077 1078 /* 1079 * Update PC and hold onto current PC in case there is 1080 * an error and we want to rollback the PC 1081 */ 1082 curr_pc = vcpu->arch.pc; 1083 er = update_pc(vcpu, cause); 1084 if (er == EMULATE_FAIL) 1085 return er; 1086 1087 er = kvm_mips_emul_hypcall(vcpu, inst); 1088 if (er == EMULATE_FAIL) 1089 vcpu->arch.pc = curr_pc; 1090 1091 return er; 1092 } 1093 1094 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode, 1095 u32 cause, 1096 u32 *opc, 1097 struct kvm_vcpu *vcpu) 1098 { 1099 u32 inst; 1100 1101 /* 1102 * Fetch the instruction. 1103 */ 1104 if (cause & CAUSEF_BD) 1105 opc += 1; 1106 kvm_get_badinstr(opc, vcpu, &inst); 1107 1108 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n", 1109 gexccode, opc, inst, read_gc0_status()); 1110 1111 return EMULATE_FAIL; 1112 } 1113 1114 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu) 1115 { 1116 u32 *opc = (u32 *) vcpu->arch.pc; 1117 u32 cause = vcpu->arch.host_cp0_cause; 1118 enum emulation_result er = EMULATE_DONE; 1119 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & 1120 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 1121 int ret = RESUME_GUEST; 1122 1123 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode); 1124 switch (gexccode) { 1125 case MIPS_GCTL0_GEXC_GPSI: 1126 ++vcpu->stat.vz_gpsi_exits; 1127 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu); 1128 break; 1129 case MIPS_GCTL0_GEXC_GSFC: 1130 ++vcpu->stat.vz_gsfc_exits; 1131 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu); 1132 break; 1133 case MIPS_GCTL0_GEXC_HC: 1134 ++vcpu->stat.vz_hc_exits; 1135 er = kvm_trap_vz_handle_hc(cause, opc, vcpu); 1136 break; 1137 case MIPS_GCTL0_GEXC_GRR: 1138 ++vcpu->stat.vz_grr_exits; 1139 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1140 vcpu); 1141 break; 1142 case MIPS_GCTL0_GEXC_GVA: 1143 ++vcpu->stat.vz_gva_exits; 1144 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1145 vcpu); 1146 break; 1147 case MIPS_GCTL0_GEXC_GHFC: 1148 ++vcpu->stat.vz_ghfc_exits; 1149 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1150 vcpu); 1151 break; 1152 case MIPS_GCTL0_GEXC_GPA: 1153 ++vcpu->stat.vz_gpa_exits; 1154 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1155 vcpu); 1156 break; 1157 default: 1158 ++vcpu->stat.vz_resvd_exits; 1159 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1160 vcpu); 1161 break; 1162 1163 } 1164 1165 if (er == EMULATE_DONE) { 1166 ret = RESUME_GUEST; 1167 } else if (er == EMULATE_HYPERCALL) { 1168 ret = kvm_mips_handle_hypcall(vcpu); 1169 } else { 1170 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1171 ret = RESUME_HOST; 1172 } 1173 return ret; 1174 } 1175 1176 /** 1177 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor. 1178 * @vcpu: Virtual CPU context. 1179 * 1180 * Handle when the guest attempts to use a coprocessor which hasn't been allowed 1181 * by the root context. 1182 */ 1183 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) 1184 { 1185 struct kvm_run *run = vcpu->run; 1186 u32 cause = vcpu->arch.host_cp0_cause; 1187 enum emulation_result er = EMULATE_FAIL; 1188 int ret = RESUME_GUEST; 1189 1190 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { 1191 /* 1192 * If guest FPU not present, the FPU operation should have been 1193 * treated as a reserved instruction! 1194 * If FPU already in use, we shouldn't get this at all. 1195 */ 1196 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || 1197 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { 1198 preempt_enable(); 1199 return EMULATE_FAIL; 1200 } 1201 1202 kvm_own_fpu(vcpu); 1203 er = EMULATE_DONE; 1204 } 1205 /* other coprocessors not handled */ 1206 1207 switch (er) { 1208 case EMULATE_DONE: 1209 ret = RESUME_GUEST; 1210 break; 1211 1212 case EMULATE_FAIL: 1213 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1214 ret = RESUME_HOST; 1215 break; 1216 1217 default: 1218 BUG(); 1219 } 1220 return ret; 1221 } 1222 1223 /** 1224 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root. 1225 * @vcpu: Virtual CPU context. 1226 * 1227 * Handle when the guest attempts to use MSA when it is disabled in the root 1228 * context. 1229 */ 1230 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) 1231 { 1232 struct kvm_run *run = vcpu->run; 1233 1234 /* 1235 * If MSA not present or not exposed to guest or FR=0, the MSA operation 1236 * should have been treated as a reserved instruction! 1237 * Same if CU1=1, FR=0. 1238 * If MSA already in use, we shouldn't get this at all. 1239 */ 1240 if (!kvm_mips_guest_has_msa(&vcpu->arch) || 1241 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 || 1242 !(read_gc0_config5() & MIPS_CONF5_MSAEN) || 1243 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { 1244 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1245 return RESUME_HOST; 1246 } 1247 1248 kvm_own_msa(vcpu); 1249 1250 return RESUME_GUEST; 1251 } 1252 1253 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) 1254 { 1255 struct kvm_run *run = vcpu->run; 1256 u32 *opc = (u32 *) vcpu->arch.pc; 1257 u32 cause = vcpu->arch.host_cp0_cause; 1258 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1259 union mips_instruction inst; 1260 enum emulation_result er = EMULATE_DONE; 1261 int err, ret = RESUME_GUEST; 1262 1263 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) { 1264 /* A code fetch fault doesn't count as an MMIO */ 1265 if (kvm_is_ifetch_fault(&vcpu->arch)) { 1266 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1267 return RESUME_HOST; 1268 } 1269 1270 /* Fetch the instruction */ 1271 if (cause & CAUSEF_BD) 1272 opc += 1; 1273 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1274 if (err) { 1275 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1276 return RESUME_HOST; 1277 } 1278 1279 /* Treat as MMIO */ 1280 er = kvm_mips_emulate_load(inst, cause, run, vcpu); 1281 if (er == EMULATE_FAIL) { 1282 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1283 opc, badvaddr); 1284 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1285 } 1286 } 1287 1288 if (er == EMULATE_DONE) { 1289 ret = RESUME_GUEST; 1290 } else if (er == EMULATE_DO_MMIO) { 1291 run->exit_reason = KVM_EXIT_MMIO; 1292 ret = RESUME_HOST; 1293 } else { 1294 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1295 ret = RESUME_HOST; 1296 } 1297 return ret; 1298 } 1299 1300 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu) 1301 { 1302 struct kvm_run *run = vcpu->run; 1303 u32 *opc = (u32 *) vcpu->arch.pc; 1304 u32 cause = vcpu->arch.host_cp0_cause; 1305 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1306 union mips_instruction inst; 1307 enum emulation_result er = EMULATE_DONE; 1308 int err; 1309 int ret = RESUME_GUEST; 1310 1311 /* Just try the access again if we couldn't do the translation */ 1312 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr)) 1313 return RESUME_GUEST; 1314 vcpu->arch.host_cp0_badvaddr = badvaddr; 1315 1316 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) { 1317 /* Fetch the instruction */ 1318 if (cause & CAUSEF_BD) 1319 opc += 1; 1320 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1321 if (err) { 1322 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1323 return RESUME_HOST; 1324 } 1325 1326 /* Treat as MMIO */ 1327 er = kvm_mips_emulate_store(inst, cause, run, vcpu); 1328 if (er == EMULATE_FAIL) { 1329 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1330 opc, badvaddr); 1331 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1332 } 1333 } 1334 1335 if (er == EMULATE_DONE) { 1336 ret = RESUME_GUEST; 1337 } else if (er == EMULATE_DO_MMIO) { 1338 run->exit_reason = KVM_EXIT_MMIO; 1339 ret = RESUME_HOST; 1340 } else { 1341 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1342 ret = RESUME_HOST; 1343 } 1344 return ret; 1345 } 1346 1347 static u64 kvm_vz_get_one_regs[] = { 1348 KVM_REG_MIPS_CP0_INDEX, 1349 KVM_REG_MIPS_CP0_ENTRYLO0, 1350 KVM_REG_MIPS_CP0_ENTRYLO1, 1351 KVM_REG_MIPS_CP0_CONTEXT, 1352 KVM_REG_MIPS_CP0_PAGEMASK, 1353 KVM_REG_MIPS_CP0_PAGEGRAIN, 1354 KVM_REG_MIPS_CP0_WIRED, 1355 KVM_REG_MIPS_CP0_HWRENA, 1356 KVM_REG_MIPS_CP0_BADVADDR, 1357 KVM_REG_MIPS_CP0_COUNT, 1358 KVM_REG_MIPS_CP0_ENTRYHI, 1359 KVM_REG_MIPS_CP0_COMPARE, 1360 KVM_REG_MIPS_CP0_STATUS, 1361 KVM_REG_MIPS_CP0_INTCTL, 1362 KVM_REG_MIPS_CP0_CAUSE, 1363 KVM_REG_MIPS_CP0_EPC, 1364 KVM_REG_MIPS_CP0_PRID, 1365 KVM_REG_MIPS_CP0_EBASE, 1366 KVM_REG_MIPS_CP0_CONFIG, 1367 KVM_REG_MIPS_CP0_CONFIG1, 1368 KVM_REG_MIPS_CP0_CONFIG2, 1369 KVM_REG_MIPS_CP0_CONFIG3, 1370 KVM_REG_MIPS_CP0_CONFIG4, 1371 KVM_REG_MIPS_CP0_CONFIG5, 1372 #ifdef CONFIG_64BIT 1373 KVM_REG_MIPS_CP0_XCONTEXT, 1374 #endif 1375 KVM_REG_MIPS_CP0_ERROREPC, 1376 1377 KVM_REG_MIPS_COUNT_CTL, 1378 KVM_REG_MIPS_COUNT_RESUME, 1379 KVM_REG_MIPS_COUNT_HZ, 1380 }; 1381 1382 static u64 kvm_vz_get_one_regs_contextconfig[] = { 1383 KVM_REG_MIPS_CP0_CONTEXTCONFIG, 1384 #ifdef CONFIG_64BIT 1385 KVM_REG_MIPS_CP0_XCONTEXTCONFIG, 1386 #endif 1387 }; 1388 1389 static u64 kvm_vz_get_one_regs_segments[] = { 1390 KVM_REG_MIPS_CP0_SEGCTL0, 1391 KVM_REG_MIPS_CP0_SEGCTL1, 1392 KVM_REG_MIPS_CP0_SEGCTL2, 1393 }; 1394 1395 static u64 kvm_vz_get_one_regs_kscratch[] = { 1396 KVM_REG_MIPS_CP0_KSCRATCH1, 1397 KVM_REG_MIPS_CP0_KSCRATCH2, 1398 KVM_REG_MIPS_CP0_KSCRATCH3, 1399 KVM_REG_MIPS_CP0_KSCRATCH4, 1400 KVM_REG_MIPS_CP0_KSCRATCH5, 1401 KVM_REG_MIPS_CP0_KSCRATCH6, 1402 }; 1403 1404 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu) 1405 { 1406 unsigned long ret; 1407 1408 ret = ARRAY_SIZE(kvm_vz_get_one_regs); 1409 if (cpu_guest_has_userlocal) 1410 ++ret; 1411 if (cpu_guest_has_badinstr) 1412 ++ret; 1413 if (cpu_guest_has_badinstrp) 1414 ++ret; 1415 if (cpu_guest_has_contextconfig) 1416 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); 1417 if (cpu_guest_has_segments) 1418 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments); 1419 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask); 1420 1421 return ret; 1422 } 1423 1424 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) 1425 { 1426 u64 index; 1427 unsigned int i; 1428 1429 if (copy_to_user(indices, kvm_vz_get_one_regs, 1430 sizeof(kvm_vz_get_one_regs))) 1431 return -EFAULT; 1432 indices += ARRAY_SIZE(kvm_vz_get_one_regs); 1433 1434 if (cpu_guest_has_userlocal) { 1435 index = KVM_REG_MIPS_CP0_USERLOCAL; 1436 if (copy_to_user(indices, &index, sizeof(index))) 1437 return -EFAULT; 1438 ++indices; 1439 } 1440 if (cpu_guest_has_badinstr) { 1441 index = KVM_REG_MIPS_CP0_BADINSTR; 1442 if (copy_to_user(indices, &index, sizeof(index))) 1443 return -EFAULT; 1444 ++indices; 1445 } 1446 if (cpu_guest_has_badinstrp) { 1447 index = KVM_REG_MIPS_CP0_BADINSTRP; 1448 if (copy_to_user(indices, &index, sizeof(index))) 1449 return -EFAULT; 1450 ++indices; 1451 } 1452 if (cpu_guest_has_contextconfig) { 1453 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig, 1454 sizeof(kvm_vz_get_one_regs_contextconfig))) 1455 return -EFAULT; 1456 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); 1457 } 1458 if (cpu_guest_has_segments) { 1459 if (copy_to_user(indices, kvm_vz_get_one_regs_segments, 1460 sizeof(kvm_vz_get_one_regs_segments))) 1461 return -EFAULT; 1462 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments); 1463 } 1464 for (i = 0; i < 6; ++i) { 1465 if (!cpu_guest_has_kscr(i + 2)) 1466 continue; 1467 1468 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i], 1469 sizeof(kvm_vz_get_one_regs_kscratch[i]))) 1470 return -EFAULT; 1471 ++indices; 1472 } 1473 1474 return 0; 1475 } 1476 1477 static inline s64 entrylo_kvm_to_user(unsigned long v) 1478 { 1479 s64 mask, ret = v; 1480 1481 if (BITS_PER_LONG == 32) { 1482 /* 1483 * KVM API exposes 64-bit version of the register, so move the 1484 * RI/XI bits up into place. 1485 */ 1486 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1487 ret &= ~mask; 1488 ret |= ((s64)v & mask) << 32; 1489 } 1490 return ret; 1491 } 1492 1493 static inline unsigned long entrylo_user_to_kvm(s64 v) 1494 { 1495 unsigned long mask, ret = v; 1496 1497 if (BITS_PER_LONG == 32) { 1498 /* 1499 * KVM API exposes 64-bit versiono of the register, so move the 1500 * RI/XI bits down into place. 1501 */ 1502 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1503 ret &= ~mask; 1504 ret |= (v >> 32) & mask; 1505 } 1506 return ret; 1507 } 1508 1509 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, 1510 const struct kvm_one_reg *reg, 1511 s64 *v) 1512 { 1513 struct mips_coproc *cop0 = vcpu->arch.cop0; 1514 unsigned int idx; 1515 1516 switch (reg->id) { 1517 case KVM_REG_MIPS_CP0_INDEX: 1518 *v = (long)read_gc0_index(); 1519 break; 1520 case KVM_REG_MIPS_CP0_ENTRYLO0: 1521 *v = entrylo_kvm_to_user(read_gc0_entrylo0()); 1522 break; 1523 case KVM_REG_MIPS_CP0_ENTRYLO1: 1524 *v = entrylo_kvm_to_user(read_gc0_entrylo1()); 1525 break; 1526 case KVM_REG_MIPS_CP0_CONTEXT: 1527 *v = (long)read_gc0_context(); 1528 break; 1529 case KVM_REG_MIPS_CP0_CONTEXTCONFIG: 1530 if (!cpu_guest_has_contextconfig) 1531 return -EINVAL; 1532 *v = read_gc0_contextconfig(); 1533 break; 1534 case KVM_REG_MIPS_CP0_USERLOCAL: 1535 if (!cpu_guest_has_userlocal) 1536 return -EINVAL; 1537 *v = read_gc0_userlocal(); 1538 break; 1539 #ifdef CONFIG_64BIT 1540 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: 1541 if (!cpu_guest_has_contextconfig) 1542 return -EINVAL; 1543 *v = read_gc0_xcontextconfig(); 1544 break; 1545 #endif 1546 case KVM_REG_MIPS_CP0_PAGEMASK: 1547 *v = (long)read_gc0_pagemask(); 1548 break; 1549 case KVM_REG_MIPS_CP0_PAGEGRAIN: 1550 *v = (long)read_gc0_pagegrain(); 1551 break; 1552 case KVM_REG_MIPS_CP0_SEGCTL0: 1553 if (!cpu_guest_has_segments) 1554 return -EINVAL; 1555 *v = read_gc0_segctl0(); 1556 break; 1557 case KVM_REG_MIPS_CP0_SEGCTL1: 1558 if (!cpu_guest_has_segments) 1559 return -EINVAL; 1560 *v = read_gc0_segctl1(); 1561 break; 1562 case KVM_REG_MIPS_CP0_SEGCTL2: 1563 if (!cpu_guest_has_segments) 1564 return -EINVAL; 1565 *v = read_gc0_segctl2(); 1566 break; 1567 case KVM_REG_MIPS_CP0_WIRED: 1568 *v = (long)read_gc0_wired(); 1569 break; 1570 case KVM_REG_MIPS_CP0_HWRENA: 1571 *v = (long)read_gc0_hwrena(); 1572 break; 1573 case KVM_REG_MIPS_CP0_BADVADDR: 1574 *v = (long)read_gc0_badvaddr(); 1575 break; 1576 case KVM_REG_MIPS_CP0_BADINSTR: 1577 if (!cpu_guest_has_badinstr) 1578 return -EINVAL; 1579 *v = read_gc0_badinstr(); 1580 break; 1581 case KVM_REG_MIPS_CP0_BADINSTRP: 1582 if (!cpu_guest_has_badinstrp) 1583 return -EINVAL; 1584 *v = read_gc0_badinstrp(); 1585 break; 1586 case KVM_REG_MIPS_CP0_COUNT: 1587 *v = kvm_mips_read_count(vcpu); 1588 break; 1589 case KVM_REG_MIPS_CP0_ENTRYHI: 1590 *v = (long)read_gc0_entryhi(); 1591 break; 1592 case KVM_REG_MIPS_CP0_COMPARE: 1593 *v = (long)read_gc0_compare(); 1594 break; 1595 case KVM_REG_MIPS_CP0_STATUS: 1596 *v = (long)read_gc0_status(); 1597 break; 1598 case KVM_REG_MIPS_CP0_INTCTL: 1599 *v = read_gc0_intctl(); 1600 break; 1601 case KVM_REG_MIPS_CP0_CAUSE: 1602 *v = (long)read_gc0_cause(); 1603 break; 1604 case KVM_REG_MIPS_CP0_EPC: 1605 *v = (long)read_gc0_epc(); 1606 break; 1607 case KVM_REG_MIPS_CP0_PRID: 1608 *v = (long)kvm_read_c0_guest_prid(cop0); 1609 break; 1610 case KVM_REG_MIPS_CP0_EBASE: 1611 *v = kvm_vz_read_gc0_ebase(); 1612 break; 1613 case KVM_REG_MIPS_CP0_CONFIG: 1614 *v = read_gc0_config(); 1615 break; 1616 case KVM_REG_MIPS_CP0_CONFIG1: 1617 if (!cpu_guest_has_conf1) 1618 return -EINVAL; 1619 *v = read_gc0_config1(); 1620 break; 1621 case KVM_REG_MIPS_CP0_CONFIG2: 1622 if (!cpu_guest_has_conf2) 1623 return -EINVAL; 1624 *v = read_gc0_config2(); 1625 break; 1626 case KVM_REG_MIPS_CP0_CONFIG3: 1627 if (!cpu_guest_has_conf3) 1628 return -EINVAL; 1629 *v = read_gc0_config3(); 1630 break; 1631 case KVM_REG_MIPS_CP0_CONFIG4: 1632 if (!cpu_guest_has_conf4) 1633 return -EINVAL; 1634 *v = read_gc0_config4(); 1635 break; 1636 case KVM_REG_MIPS_CP0_CONFIG5: 1637 if (!cpu_guest_has_conf5) 1638 return -EINVAL; 1639 *v = read_gc0_config5(); 1640 break; 1641 #ifdef CONFIG_64BIT 1642 case KVM_REG_MIPS_CP0_XCONTEXT: 1643 *v = read_gc0_xcontext(); 1644 break; 1645 #endif 1646 case KVM_REG_MIPS_CP0_ERROREPC: 1647 *v = (long)read_gc0_errorepc(); 1648 break; 1649 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 1650 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 1651 if (!cpu_guest_has_kscr(idx)) 1652 return -EINVAL; 1653 switch (idx) { 1654 case 2: 1655 *v = (long)read_gc0_kscratch1(); 1656 break; 1657 case 3: 1658 *v = (long)read_gc0_kscratch2(); 1659 break; 1660 case 4: 1661 *v = (long)read_gc0_kscratch3(); 1662 break; 1663 case 5: 1664 *v = (long)read_gc0_kscratch4(); 1665 break; 1666 case 6: 1667 *v = (long)read_gc0_kscratch5(); 1668 break; 1669 case 7: 1670 *v = (long)read_gc0_kscratch6(); 1671 break; 1672 } 1673 break; 1674 case KVM_REG_MIPS_COUNT_CTL: 1675 *v = vcpu->arch.count_ctl; 1676 break; 1677 case KVM_REG_MIPS_COUNT_RESUME: 1678 *v = ktime_to_ns(vcpu->arch.count_resume); 1679 break; 1680 case KVM_REG_MIPS_COUNT_HZ: 1681 *v = vcpu->arch.count_hz; 1682 break; 1683 default: 1684 return -EINVAL; 1685 } 1686 return 0; 1687 } 1688 1689 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, 1690 const struct kvm_one_reg *reg, 1691 s64 v) 1692 { 1693 struct mips_coproc *cop0 = vcpu->arch.cop0; 1694 unsigned int idx; 1695 int ret = 0; 1696 unsigned int cur, change; 1697 1698 switch (reg->id) { 1699 case KVM_REG_MIPS_CP0_INDEX: 1700 write_gc0_index(v); 1701 break; 1702 case KVM_REG_MIPS_CP0_ENTRYLO0: 1703 write_gc0_entrylo0(entrylo_user_to_kvm(v)); 1704 break; 1705 case KVM_REG_MIPS_CP0_ENTRYLO1: 1706 write_gc0_entrylo1(entrylo_user_to_kvm(v)); 1707 break; 1708 case KVM_REG_MIPS_CP0_CONTEXT: 1709 write_gc0_context(v); 1710 break; 1711 case KVM_REG_MIPS_CP0_CONTEXTCONFIG: 1712 if (!cpu_guest_has_contextconfig) 1713 return -EINVAL; 1714 write_gc0_contextconfig(v); 1715 break; 1716 case KVM_REG_MIPS_CP0_USERLOCAL: 1717 if (!cpu_guest_has_userlocal) 1718 return -EINVAL; 1719 write_gc0_userlocal(v); 1720 break; 1721 #ifdef CONFIG_64BIT 1722 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: 1723 if (!cpu_guest_has_contextconfig) 1724 return -EINVAL; 1725 write_gc0_xcontextconfig(v); 1726 break; 1727 #endif 1728 case KVM_REG_MIPS_CP0_PAGEMASK: 1729 write_gc0_pagemask(v); 1730 break; 1731 case KVM_REG_MIPS_CP0_PAGEGRAIN: 1732 write_gc0_pagegrain(v); 1733 break; 1734 case KVM_REG_MIPS_CP0_SEGCTL0: 1735 if (!cpu_guest_has_segments) 1736 return -EINVAL; 1737 write_gc0_segctl0(v); 1738 break; 1739 case KVM_REG_MIPS_CP0_SEGCTL1: 1740 if (!cpu_guest_has_segments) 1741 return -EINVAL; 1742 write_gc0_segctl1(v); 1743 break; 1744 case KVM_REG_MIPS_CP0_SEGCTL2: 1745 if (!cpu_guest_has_segments) 1746 return -EINVAL; 1747 write_gc0_segctl2(v); 1748 break; 1749 case KVM_REG_MIPS_CP0_WIRED: 1750 change_gc0_wired(MIPSR6_WIRED_WIRED, v); 1751 break; 1752 case KVM_REG_MIPS_CP0_HWRENA: 1753 write_gc0_hwrena(v); 1754 break; 1755 case KVM_REG_MIPS_CP0_BADVADDR: 1756 write_gc0_badvaddr(v); 1757 break; 1758 case KVM_REG_MIPS_CP0_BADINSTR: 1759 if (!cpu_guest_has_badinstr) 1760 return -EINVAL; 1761 write_gc0_badinstr(v); 1762 break; 1763 case KVM_REG_MIPS_CP0_BADINSTRP: 1764 if (!cpu_guest_has_badinstrp) 1765 return -EINVAL; 1766 write_gc0_badinstrp(v); 1767 break; 1768 case KVM_REG_MIPS_CP0_COUNT: 1769 kvm_mips_write_count(vcpu, v); 1770 break; 1771 case KVM_REG_MIPS_CP0_ENTRYHI: 1772 write_gc0_entryhi(v); 1773 break; 1774 case KVM_REG_MIPS_CP0_COMPARE: 1775 kvm_mips_write_compare(vcpu, v, false); 1776 break; 1777 case KVM_REG_MIPS_CP0_STATUS: 1778 write_gc0_status(v); 1779 break; 1780 case KVM_REG_MIPS_CP0_INTCTL: 1781 write_gc0_intctl(v); 1782 break; 1783 case KVM_REG_MIPS_CP0_CAUSE: 1784 /* 1785 * If the timer is stopped or started (DC bit) it must look 1786 * atomic with changes to the timer interrupt pending bit (TI). 1787 * A timer interrupt should not happen in between. 1788 */ 1789 if ((read_gc0_cause() ^ v) & CAUSEF_DC) { 1790 if (v & CAUSEF_DC) { 1791 /* disable timer first */ 1792 kvm_mips_count_disable_cause(vcpu); 1793 change_gc0_cause((u32)~CAUSEF_DC, v); 1794 } else { 1795 /* enable timer last */ 1796 change_gc0_cause((u32)~CAUSEF_DC, v); 1797 kvm_mips_count_enable_cause(vcpu); 1798 } 1799 } else { 1800 write_gc0_cause(v); 1801 } 1802 break; 1803 case KVM_REG_MIPS_CP0_EPC: 1804 write_gc0_epc(v); 1805 break; 1806 case KVM_REG_MIPS_CP0_PRID: 1807 kvm_write_c0_guest_prid(cop0, v); 1808 break; 1809 case KVM_REG_MIPS_CP0_EBASE: 1810 kvm_vz_write_gc0_ebase(v); 1811 break; 1812 case KVM_REG_MIPS_CP0_CONFIG: 1813 cur = read_gc0_config(); 1814 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu); 1815 if (change) { 1816 v = cur ^ change; 1817 write_gc0_config(v); 1818 } 1819 break; 1820 case KVM_REG_MIPS_CP0_CONFIG1: 1821 if (!cpu_guest_has_conf1) 1822 break; 1823 cur = read_gc0_config1(); 1824 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu); 1825 if (change) { 1826 v = cur ^ change; 1827 write_gc0_config1(v); 1828 } 1829 break; 1830 case KVM_REG_MIPS_CP0_CONFIG2: 1831 if (!cpu_guest_has_conf2) 1832 break; 1833 cur = read_gc0_config2(); 1834 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu); 1835 if (change) { 1836 v = cur ^ change; 1837 write_gc0_config2(v); 1838 } 1839 break; 1840 case KVM_REG_MIPS_CP0_CONFIG3: 1841 if (!cpu_guest_has_conf3) 1842 break; 1843 cur = read_gc0_config3(); 1844 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu); 1845 if (change) { 1846 v = cur ^ change; 1847 write_gc0_config3(v); 1848 } 1849 break; 1850 case KVM_REG_MIPS_CP0_CONFIG4: 1851 if (!cpu_guest_has_conf4) 1852 break; 1853 cur = read_gc0_config4(); 1854 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu); 1855 if (change) { 1856 v = cur ^ change; 1857 write_gc0_config4(v); 1858 } 1859 break; 1860 case KVM_REG_MIPS_CP0_CONFIG5: 1861 if (!cpu_guest_has_conf5) 1862 break; 1863 cur = read_gc0_config5(); 1864 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu); 1865 if (change) { 1866 v = cur ^ change; 1867 write_gc0_config5(v); 1868 } 1869 break; 1870 #ifdef CONFIG_64BIT 1871 case KVM_REG_MIPS_CP0_XCONTEXT: 1872 write_gc0_xcontext(v); 1873 break; 1874 #endif 1875 case KVM_REG_MIPS_CP0_ERROREPC: 1876 write_gc0_errorepc(v); 1877 break; 1878 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 1879 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 1880 if (!cpu_guest_has_kscr(idx)) 1881 return -EINVAL; 1882 switch (idx) { 1883 case 2: 1884 write_gc0_kscratch1(v); 1885 break; 1886 case 3: 1887 write_gc0_kscratch2(v); 1888 break; 1889 case 4: 1890 write_gc0_kscratch3(v); 1891 break; 1892 case 5: 1893 write_gc0_kscratch4(v); 1894 break; 1895 case 6: 1896 write_gc0_kscratch5(v); 1897 break; 1898 case 7: 1899 write_gc0_kscratch6(v); 1900 break; 1901 } 1902 break; 1903 case KVM_REG_MIPS_COUNT_CTL: 1904 ret = kvm_mips_set_count_ctl(vcpu, v); 1905 break; 1906 case KVM_REG_MIPS_COUNT_RESUME: 1907 ret = kvm_mips_set_count_resume(vcpu, v); 1908 break; 1909 case KVM_REG_MIPS_COUNT_HZ: 1910 ret = kvm_mips_set_count_hz(vcpu, v); 1911 break; 1912 default: 1913 return -EINVAL; 1914 } 1915 return ret; 1916 } 1917 1918 #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache) 1919 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu) 1920 { 1921 unsigned long guestid = guestid_cache(cpu); 1922 1923 if (!(++guestid & GUESTID_MASK)) { 1924 if (cpu_has_vtag_icache) 1925 flush_icache_all(); 1926 1927 if (!guestid) /* fix version if needed */ 1928 guestid = GUESTID_FIRST_VERSION; 1929 1930 ++guestid; /* guestid 0 reserved for root */ 1931 1932 /* start new guestid cycle */ 1933 kvm_vz_local_flush_roottlb_all_guests(); 1934 kvm_vz_local_flush_guesttlb_all(); 1935 } 1936 1937 guestid_cache(cpu) = guestid; 1938 } 1939 1940 /* Returns 1 if the guest TLB may be clobbered */ 1941 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu) 1942 { 1943 int ret = 0; 1944 int i; 1945 1946 if (!vcpu->requests) 1947 return 0; 1948 1949 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1950 if (cpu_has_guestid) { 1951 /* Drop all GuestIDs for this VCPU */ 1952 for_each_possible_cpu(i) 1953 vcpu->arch.vzguestid[i] = 0; 1954 /* This will clobber guest TLB contents too */ 1955 ret = 1; 1956 } 1957 /* 1958 * For Root ASID Dealias (RAD) we don't do anything here, but we 1959 * still need the request to ensure we recheck asid_flush_mask. 1960 * We can still return 0 as only the root TLB will be affected 1961 * by a root ASID flush. 1962 */ 1963 } 1964 1965 return ret; 1966 } 1967 1968 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu) 1969 { 1970 unsigned int wired = read_gc0_wired(); 1971 struct kvm_mips_tlb *tlbs; 1972 int i; 1973 1974 /* Expand the wired TLB array if necessary */ 1975 wired &= MIPSR6_WIRED_WIRED; 1976 if (wired > vcpu->arch.wired_tlb_limit) { 1977 tlbs = krealloc(vcpu->arch.wired_tlb, wired * 1978 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); 1979 if (WARN_ON(!tlbs)) { 1980 /* Save whatever we can */ 1981 wired = vcpu->arch.wired_tlb_limit; 1982 } else { 1983 vcpu->arch.wired_tlb = tlbs; 1984 vcpu->arch.wired_tlb_limit = wired; 1985 } 1986 } 1987 1988 if (wired) 1989 /* Save wired entries from the guest TLB */ 1990 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); 1991 /* Invalidate any dropped entries since last time */ 1992 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { 1993 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 1994 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; 1995 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; 1996 vcpu->arch.wired_tlb[i].tlb_mask = 0; 1997 } 1998 vcpu->arch.wired_tlb_used = wired; 1999 } 2000 2001 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu) 2002 { 2003 /* Load wired entries into the guest TLB */ 2004 if (vcpu->arch.wired_tlb) 2005 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, 2006 vcpu->arch.wired_tlb_used); 2007 } 2008 2009 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) 2010 { 2011 struct kvm *kvm = vcpu->kvm; 2012 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; 2013 bool migrated; 2014 2015 /* 2016 * Are we entering guest context on a different CPU to last time? 2017 * If so, the VCPU's guest TLB state on this CPU may be stale. 2018 */ 2019 migrated = (vcpu->arch.last_exec_cpu != cpu); 2020 vcpu->arch.last_exec_cpu = cpu; 2021 2022 /* 2023 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and 2024 * remains set until another vcpu is loaded in. As a rule GuestRID 2025 * remains zeroed when in root context unless the kernel is busy 2026 * manipulating guest tlb entries. 2027 */ 2028 if (cpu_has_guestid) { 2029 /* 2030 * Check if our GuestID is of an older version and thus invalid. 2031 * 2032 * We also discard the stored GuestID if we've executed on 2033 * another CPU, as the guest mappings may have changed without 2034 * hypervisor knowledge. 2035 */ 2036 if (migrated || 2037 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & 2038 GUESTID_VERSION_MASK) { 2039 kvm_vz_get_new_guestid(cpu, vcpu); 2040 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); 2041 trace_kvm_guestid_change(vcpu, 2042 vcpu->arch.vzguestid[cpu]); 2043 } 2044 2045 /* Restore GuestID */ 2046 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); 2047 } else { 2048 /* 2049 * The Guest TLB only stores a single guest's TLB state, so 2050 * flush it if another VCPU has executed on this CPU. 2051 * 2052 * We also flush if we've executed on another CPU, as the guest 2053 * mappings may have changed without hypervisor knowledge. 2054 */ 2055 if (migrated || last_exec_vcpu[cpu] != vcpu) 2056 kvm_vz_local_flush_guesttlb_all(); 2057 last_exec_vcpu[cpu] = vcpu; 2058 2059 /* 2060 * Root ASID dealiases guest GPA mappings in the root TLB. 2061 * Allocate new root ASID if needed. 2062 */ 2063 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) 2064 || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) & 2065 asid_version_mask(cpu)) 2066 get_new_mmu_context(gpa_mm, cpu); 2067 } 2068 } 2069 2070 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2071 { 2072 struct mips_coproc *cop0 = vcpu->arch.cop0; 2073 bool migrated, all; 2074 2075 /* 2076 * Have we migrated to a different CPU? 2077 * If so, any old guest TLB state may be stale. 2078 */ 2079 migrated = (vcpu->arch.last_sched_cpu != cpu); 2080 2081 /* 2082 * Was this the last VCPU to run on this CPU? 2083 * If not, any old guest state from this VCPU will have been clobbered. 2084 */ 2085 all = migrated || (last_vcpu[cpu] != vcpu); 2086 last_vcpu[cpu] = vcpu; 2087 2088 /* 2089 * Restore CP0_Wired unconditionally as we clear it after use, and 2090 * restore wired guest TLB entries (while in guest context). 2091 */ 2092 kvm_restore_gc0_wired(cop0); 2093 if (current->flags & PF_VCPU) { 2094 tlbw_use_hazard(); 2095 kvm_vz_vcpu_load_tlb(vcpu, cpu); 2096 kvm_vz_vcpu_load_wired(vcpu); 2097 } 2098 2099 /* 2100 * Restore timer state regardless, as e.g. Cause.TI can change over time 2101 * if left unmaintained. 2102 */ 2103 kvm_vz_restore_timer(vcpu); 2104 2105 /* Don't bother restoring registers multiple times unless necessary */ 2106 if (!all) 2107 return 0; 2108 2109 /* 2110 * Restore config registers first, as some implementations restrict 2111 * writes to other registers when the corresponding feature bits aren't 2112 * set. For example Status.CU1 cannot be set unless Config1.FP is set. 2113 */ 2114 kvm_restore_gc0_config(cop0); 2115 if (cpu_guest_has_conf1) 2116 kvm_restore_gc0_config1(cop0); 2117 if (cpu_guest_has_conf2) 2118 kvm_restore_gc0_config2(cop0); 2119 if (cpu_guest_has_conf3) 2120 kvm_restore_gc0_config3(cop0); 2121 if (cpu_guest_has_conf4) 2122 kvm_restore_gc0_config4(cop0); 2123 if (cpu_guest_has_conf5) 2124 kvm_restore_gc0_config5(cop0); 2125 if (cpu_guest_has_conf6) 2126 kvm_restore_gc0_config6(cop0); 2127 if (cpu_guest_has_conf7) 2128 kvm_restore_gc0_config7(cop0); 2129 2130 kvm_restore_gc0_index(cop0); 2131 kvm_restore_gc0_entrylo0(cop0); 2132 kvm_restore_gc0_entrylo1(cop0); 2133 kvm_restore_gc0_context(cop0); 2134 if (cpu_guest_has_contextconfig) 2135 kvm_restore_gc0_contextconfig(cop0); 2136 #ifdef CONFIG_64BIT 2137 kvm_restore_gc0_xcontext(cop0); 2138 if (cpu_guest_has_contextconfig) 2139 kvm_restore_gc0_xcontextconfig(cop0); 2140 #endif 2141 kvm_restore_gc0_pagemask(cop0); 2142 kvm_restore_gc0_pagegrain(cop0); 2143 kvm_restore_gc0_hwrena(cop0); 2144 kvm_restore_gc0_badvaddr(cop0); 2145 kvm_restore_gc0_entryhi(cop0); 2146 kvm_restore_gc0_status(cop0); 2147 kvm_restore_gc0_intctl(cop0); 2148 kvm_restore_gc0_epc(cop0); 2149 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0)); 2150 if (cpu_guest_has_userlocal) 2151 kvm_restore_gc0_userlocal(cop0); 2152 2153 kvm_restore_gc0_errorepc(cop0); 2154 2155 /* restore KScratch registers if enabled in guest */ 2156 if (cpu_guest_has_conf4) { 2157 if (cpu_guest_has_kscr(2)) 2158 kvm_restore_gc0_kscratch1(cop0); 2159 if (cpu_guest_has_kscr(3)) 2160 kvm_restore_gc0_kscratch2(cop0); 2161 if (cpu_guest_has_kscr(4)) 2162 kvm_restore_gc0_kscratch3(cop0); 2163 if (cpu_guest_has_kscr(5)) 2164 kvm_restore_gc0_kscratch4(cop0); 2165 if (cpu_guest_has_kscr(6)) 2166 kvm_restore_gc0_kscratch5(cop0); 2167 if (cpu_guest_has_kscr(7)) 2168 kvm_restore_gc0_kscratch6(cop0); 2169 } 2170 2171 if (cpu_guest_has_badinstr) 2172 kvm_restore_gc0_badinstr(cop0); 2173 if (cpu_guest_has_badinstrp) 2174 kvm_restore_gc0_badinstrp(cop0); 2175 2176 if (cpu_guest_has_segments) { 2177 kvm_restore_gc0_segctl0(cop0); 2178 kvm_restore_gc0_segctl1(cop0); 2179 kvm_restore_gc0_segctl2(cop0); 2180 } 2181 2182 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */ 2183 if (cpu_has_guestctl2) 2184 write_c0_guestctl2( 2185 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); 2186 2187 return 0; 2188 } 2189 2190 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) 2191 { 2192 struct mips_coproc *cop0 = vcpu->arch.cop0; 2193 2194 if (current->flags & PF_VCPU) 2195 kvm_vz_vcpu_save_wired(vcpu); 2196 2197 kvm_lose_fpu(vcpu); 2198 2199 kvm_save_gc0_index(cop0); 2200 kvm_save_gc0_entrylo0(cop0); 2201 kvm_save_gc0_entrylo1(cop0); 2202 kvm_save_gc0_context(cop0); 2203 if (cpu_guest_has_contextconfig) 2204 kvm_save_gc0_contextconfig(cop0); 2205 #ifdef CONFIG_64BIT 2206 kvm_save_gc0_xcontext(cop0); 2207 if (cpu_guest_has_contextconfig) 2208 kvm_save_gc0_xcontextconfig(cop0); 2209 #endif 2210 kvm_save_gc0_pagemask(cop0); 2211 kvm_save_gc0_pagegrain(cop0); 2212 kvm_save_gc0_wired(cop0); 2213 /* allow wired TLB entries to be overwritten */ 2214 clear_gc0_wired(MIPSR6_WIRED_WIRED); 2215 kvm_save_gc0_hwrena(cop0); 2216 kvm_save_gc0_badvaddr(cop0); 2217 kvm_save_gc0_entryhi(cop0); 2218 kvm_save_gc0_status(cop0); 2219 kvm_save_gc0_intctl(cop0); 2220 kvm_save_gc0_epc(cop0); 2221 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase()); 2222 if (cpu_guest_has_userlocal) 2223 kvm_save_gc0_userlocal(cop0); 2224 2225 /* only save implemented config registers */ 2226 kvm_save_gc0_config(cop0); 2227 if (cpu_guest_has_conf1) 2228 kvm_save_gc0_config1(cop0); 2229 if (cpu_guest_has_conf2) 2230 kvm_save_gc0_config2(cop0); 2231 if (cpu_guest_has_conf3) 2232 kvm_save_gc0_config3(cop0); 2233 if (cpu_guest_has_conf4) 2234 kvm_save_gc0_config4(cop0); 2235 if (cpu_guest_has_conf5) 2236 kvm_save_gc0_config5(cop0); 2237 if (cpu_guest_has_conf6) 2238 kvm_save_gc0_config6(cop0); 2239 if (cpu_guest_has_conf7) 2240 kvm_save_gc0_config7(cop0); 2241 2242 kvm_save_gc0_errorepc(cop0); 2243 2244 /* save KScratch registers if enabled in guest */ 2245 if (cpu_guest_has_conf4) { 2246 if (cpu_guest_has_kscr(2)) 2247 kvm_save_gc0_kscratch1(cop0); 2248 if (cpu_guest_has_kscr(3)) 2249 kvm_save_gc0_kscratch2(cop0); 2250 if (cpu_guest_has_kscr(4)) 2251 kvm_save_gc0_kscratch3(cop0); 2252 if (cpu_guest_has_kscr(5)) 2253 kvm_save_gc0_kscratch4(cop0); 2254 if (cpu_guest_has_kscr(6)) 2255 kvm_save_gc0_kscratch5(cop0); 2256 if (cpu_guest_has_kscr(7)) 2257 kvm_save_gc0_kscratch6(cop0); 2258 } 2259 2260 if (cpu_guest_has_badinstr) 2261 kvm_save_gc0_badinstr(cop0); 2262 if (cpu_guest_has_badinstrp) 2263 kvm_save_gc0_badinstrp(cop0); 2264 2265 if (cpu_guest_has_segments) { 2266 kvm_save_gc0_segctl0(cop0); 2267 kvm_save_gc0_segctl1(cop0); 2268 kvm_save_gc0_segctl2(cop0); 2269 } 2270 2271 kvm_vz_save_timer(vcpu); 2272 2273 /* save Root.GuestCtl2 in unused Guest guestctl2 register */ 2274 if (cpu_has_guestctl2) 2275 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 2276 read_c0_guestctl2(); 2277 2278 return 0; 2279 } 2280 2281 /** 2282 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB. 2283 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries). 2284 * 2285 * Attempt to resize the guest VTLB by writing guest Config registers. This is 2286 * necessary for cores with a shared root/guest TLB to avoid overlap with wired 2287 * entries in the root VTLB. 2288 * 2289 * Returns: The resulting guest VTLB size. 2290 */ 2291 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size) 2292 { 2293 unsigned int config4 = 0, ret = 0, limit; 2294 2295 /* Write MMUSize - 1 into guest Config registers */ 2296 if (cpu_guest_has_conf1) 2297 change_gc0_config1(MIPS_CONF1_TLBS, 2298 (size - 1) << MIPS_CONF1_TLBS_SHIFT); 2299 if (cpu_guest_has_conf4) { 2300 config4 = read_gc0_config4(); 2301 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 2302 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) { 2303 config4 &= ~MIPS_CONF4_VTLBSIZEEXT; 2304 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 2305 MIPS_CONF4_VTLBSIZEEXT_SHIFT; 2306 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 2307 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) { 2308 config4 &= ~MIPS_CONF4_MMUSIZEEXT; 2309 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 2310 MIPS_CONF4_MMUSIZEEXT_SHIFT; 2311 } 2312 write_gc0_config4(config4); 2313 } 2314 2315 /* 2316 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it 2317 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write 2318 * not dropped) 2319 */ 2320 if (cpu_has_mips_r6) { 2321 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >> 2322 MIPSR6_WIRED_LIMIT_SHIFT; 2323 if (size - 1 <= limit) 2324 limit = 0; 2325 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT); 2326 } 2327 2328 /* Read back MMUSize - 1 */ 2329 back_to_back_c0_hazard(); 2330 if (cpu_guest_has_conf1) 2331 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >> 2332 MIPS_CONF1_TLBS_SHIFT; 2333 if (config4) { 2334 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 2335 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) 2336 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> 2337 MIPS_CONF4_VTLBSIZEEXT_SHIFT) << 2338 MIPS_CONF1_TLBS_SIZE; 2339 else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 2340 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) 2341 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >> 2342 MIPS_CONF4_MMUSIZEEXT_SHIFT) << 2343 MIPS_CONF1_TLBS_SIZE; 2344 } 2345 return ret + 1; 2346 } 2347 2348 static int kvm_vz_hardware_enable(void) 2349 { 2350 unsigned int mmu_size, guest_mmu_size, ftlb_size; 2351 2352 /* 2353 * ImgTec cores tend to use a shared root/guest TLB. To avoid overlap of 2354 * root wired and guest entries, the guest TLB may need resizing. 2355 */ 2356 mmu_size = current_cpu_data.tlbsizevtlb; 2357 ftlb_size = current_cpu_data.tlbsize - mmu_size; 2358 2359 /* Try switching to maximum guest VTLB size for flush */ 2360 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size); 2361 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2362 kvm_vz_local_flush_guesttlb_all(); 2363 2364 /* 2365 * Reduce to make space for root wired entries and at least 2 root 2366 * non-wired entries. This does assume that long-term wired entries 2367 * won't be added later. 2368 */ 2369 guest_mmu_size = mmu_size - num_wired_entries() - 2; 2370 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size); 2371 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2372 2373 /* 2374 * Write the VTLB size, but if another CPU has already written, check it 2375 * matches or we won't provide a consistent view to the guest. If this 2376 * ever happens it suggests an asymmetric number of wired entries. 2377 */ 2378 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) && 2379 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size, 2380 "Available guest VTLB size mismatch")) 2381 return -EINVAL; 2382 2383 /* 2384 * Enable virtualization features granting guest direct control of 2385 * certain features: 2386 * CP0=1: Guest coprocessor 0 context. 2387 * AT=Guest: Guest MMU. 2388 * CG=1: Hit (virtual address) CACHE operations (optional). 2389 * CF=1: Guest Config registers. 2390 * CGI=1: Indexed flush CACHE operations (optional). 2391 */ 2392 write_c0_guestctl0(MIPS_GCTL0_CP0 | 2393 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) | 2394 MIPS_GCTL0_CG | MIPS_GCTL0_CF); 2395 if (cpu_has_guestctl0ext) 2396 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); 2397 2398 if (cpu_has_guestid) { 2399 write_c0_guestctl1(0); 2400 kvm_vz_local_flush_roottlb_all_guests(); 2401 2402 GUESTID_MASK = current_cpu_data.guestid_mask; 2403 GUESTID_FIRST_VERSION = GUESTID_MASK + 1; 2404 GUESTID_VERSION_MASK = ~GUESTID_MASK; 2405 2406 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION; 2407 } 2408 2409 /* clear any pending injected virtual guest interrupts */ 2410 if (cpu_has_guestctl2) 2411 clear_c0_guestctl2(0x3f << 10); 2412 2413 return 0; 2414 } 2415 2416 static void kvm_vz_hardware_disable(void) 2417 { 2418 kvm_vz_local_flush_guesttlb_all(); 2419 2420 if (cpu_has_guestid) { 2421 write_c0_guestctl1(0); 2422 kvm_vz_local_flush_roottlb_all_guests(); 2423 } 2424 } 2425 2426 static int kvm_vz_check_extension(struct kvm *kvm, long ext) 2427 { 2428 int r; 2429 2430 switch (ext) { 2431 case KVM_CAP_MIPS_VZ: 2432 /* we wouldn't be here unless cpu_has_vz */ 2433 r = 1; 2434 break; 2435 #ifdef CONFIG_64BIT 2436 case KVM_CAP_MIPS_64BIT: 2437 /* We support 64-bit registers/operations and addresses */ 2438 r = 2; 2439 break; 2440 #endif 2441 default: 2442 r = 0; 2443 break; 2444 } 2445 2446 return r; 2447 } 2448 2449 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu) 2450 { 2451 int i; 2452 2453 for_each_possible_cpu(i) 2454 vcpu->arch.vzguestid[i] = 0; 2455 2456 return 0; 2457 } 2458 2459 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu) 2460 { 2461 int cpu; 2462 2463 /* 2464 * If the VCPU is freed and reused as another VCPU, we don't want the 2465 * matching pointer wrongly hanging around in last_vcpu[] or 2466 * last_exec_vcpu[]. 2467 */ 2468 for_each_possible_cpu(cpu) { 2469 if (last_vcpu[cpu] == vcpu) 2470 last_vcpu[cpu] = NULL; 2471 if (last_exec_vcpu[cpu] == vcpu) 2472 last_exec_vcpu[cpu] = NULL; 2473 } 2474 } 2475 2476 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) 2477 { 2478 struct mips_coproc *cop0 = vcpu->arch.cop0; 2479 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ 2480 2481 /* 2482 * Start off the timer at the same frequency as the host timer, but the 2483 * soft timer doesn't handle frequencies greater than 1GHz yet. 2484 */ 2485 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC) 2486 count_hz = mips_hpt_frequency; 2487 kvm_mips_init_count(vcpu, count_hz); 2488 2489 /* 2490 * Initialize guest register state to valid architectural reset state. 2491 */ 2492 2493 /* PageGrain */ 2494 if (cpu_has_mips_r6) 2495 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC); 2496 /* Wired */ 2497 if (cpu_has_mips_r6) 2498 kvm_write_sw_gc0_wired(cop0, 2499 read_gc0_wired() & MIPSR6_WIRED_LIMIT); 2500 /* Status */ 2501 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL); 2502 if (cpu_has_mips_r6) 2503 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status()); 2504 /* IntCtl */ 2505 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() & 2506 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI)); 2507 /* PRId */ 2508 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id); 2509 /* EBase */ 2510 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); 2511 /* Config */ 2512 kvm_save_gc0_config(cop0); 2513 /* architecturally writable (e.g. from guest) */ 2514 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK, 2515 _page_cachable_default >> _CACHE_SHIFT); 2516 /* architecturally read only, but maybe writable from root */ 2517 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config()); 2518 if (cpu_guest_has_conf1) { 2519 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M); 2520 /* Config1 */ 2521 kvm_save_gc0_config1(cop0); 2522 /* architecturally read only, but maybe writable from root */ 2523 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 | 2524 MIPS_CONF1_MD | 2525 MIPS_CONF1_PC | 2526 MIPS_CONF1_WR | 2527 MIPS_CONF1_CA | 2528 MIPS_CONF1_FP); 2529 } 2530 if (cpu_guest_has_conf2) { 2531 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M); 2532 /* Config2 */ 2533 kvm_save_gc0_config2(cop0); 2534 } 2535 if (cpu_guest_has_conf3) { 2536 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M); 2537 /* Config3 */ 2538 kvm_save_gc0_config3(cop0); 2539 /* architecturally writable (e.g. from guest) */ 2540 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE); 2541 /* architecturally read only, but maybe writable from root */ 2542 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA | 2543 MIPS_CONF3_BPG | 2544 MIPS_CONF3_ULRI | 2545 MIPS_CONF3_DSP | 2546 MIPS_CONF3_CTXTC | 2547 MIPS_CONF3_ITL | 2548 MIPS_CONF3_LPA | 2549 MIPS_CONF3_VEIC | 2550 MIPS_CONF3_VINT | 2551 MIPS_CONF3_SP | 2552 MIPS_CONF3_CDMM | 2553 MIPS_CONF3_MT | 2554 MIPS_CONF3_SM | 2555 MIPS_CONF3_TL); 2556 } 2557 if (cpu_guest_has_conf4) { 2558 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M); 2559 /* Config4 */ 2560 kvm_save_gc0_config4(cop0); 2561 } 2562 if (cpu_guest_has_conf5) { 2563 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M); 2564 /* Config5 */ 2565 kvm_save_gc0_config5(cop0); 2566 /* architecturally writable (e.g. from guest) */ 2567 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K | 2568 MIPS_CONF5_CV | 2569 MIPS_CONF5_MSAEN | 2570 MIPS_CONF5_UFE | 2571 MIPS_CONF5_FRE | 2572 MIPS_CONF5_SBRI | 2573 MIPS_CONF5_UFR); 2574 /* architecturally read only, but maybe writable from root */ 2575 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP); 2576 } 2577 2578 if (cpu_guest_has_contextconfig) { 2579 /* ContextConfig */ 2580 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0); 2581 #ifdef CONFIG_64BIT 2582 /* XContextConfig */ 2583 /* bits SEGBITS-13+3:4 set */ 2584 kvm_write_sw_gc0_xcontextconfig(cop0, 2585 ((1ull << (cpu_vmbits - 13)) - 1) << 4); 2586 #endif 2587 } 2588 2589 /* Implementation dependent, use the legacy layout */ 2590 if (cpu_guest_has_segments) { 2591 /* SegCtl0, SegCtl1, SegCtl2 */ 2592 kvm_write_sw_gc0_segctl0(cop0, 0x00200010); 2593 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 | 2594 (_page_cachable_default >> _CACHE_SHIFT) << 2595 (16 + MIPS_SEGCFG_C_SHIFT)); 2596 kvm_write_sw_gc0_segctl2(cop0, 0x00380438); 2597 } 2598 2599 /* start with no pending virtual guest interrupts */ 2600 if (cpu_has_guestctl2) 2601 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; 2602 2603 /* Put PC at reset vector */ 2604 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); 2605 2606 return 0; 2607 } 2608 2609 static void kvm_vz_flush_shadow_all(struct kvm *kvm) 2610 { 2611 if (cpu_has_guestid) { 2612 /* Flush GuestID for each VCPU individually */ 2613 kvm_flush_remote_tlbs(kvm); 2614 } else { 2615 /* 2616 * For each CPU there is a single GPA ASID used by all VCPUs in 2617 * the VM, so it doesn't make sense for the VCPUs to handle 2618 * invalidation of these ASIDs individually. 2619 * 2620 * Instead mark all CPUs as needing ASID invalidation in 2621 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to 2622 * kick any running VCPUs so they check asid_flush_mask. 2623 */ 2624 cpumask_setall(&kvm->arch.asid_flush_mask); 2625 kvm_flush_remote_tlbs(kvm); 2626 } 2627 } 2628 2629 static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, 2630 const struct kvm_memory_slot *slot) 2631 { 2632 kvm_vz_flush_shadow_all(kvm); 2633 } 2634 2635 static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu) 2636 { 2637 int cpu = smp_processor_id(); 2638 int preserve_guest_tlb; 2639 2640 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu); 2641 2642 if (preserve_guest_tlb) 2643 kvm_vz_vcpu_save_wired(vcpu); 2644 2645 kvm_vz_vcpu_load_tlb(vcpu, cpu); 2646 2647 if (preserve_guest_tlb) 2648 kvm_vz_vcpu_load_wired(vcpu); 2649 } 2650 2651 static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) 2652 { 2653 int cpu = smp_processor_id(); 2654 int r; 2655 2656 /* Check if we have any exceptions/interrupts pending */ 2657 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause()); 2658 2659 kvm_vz_check_requests(vcpu, cpu); 2660 kvm_vz_vcpu_load_tlb(vcpu, cpu); 2661 kvm_vz_vcpu_load_wired(vcpu); 2662 2663 r = vcpu->arch.vcpu_run(run, vcpu); 2664 2665 kvm_vz_vcpu_save_wired(vcpu); 2666 2667 return r; 2668 } 2669 2670 static struct kvm_mips_callbacks kvm_vz_callbacks = { 2671 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable, 2672 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss, 2673 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss, 2674 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss, 2675 .handle_addr_err_st = kvm_trap_vz_no_handler, 2676 .handle_addr_err_ld = kvm_trap_vz_no_handler, 2677 .handle_syscall = kvm_trap_vz_no_handler, 2678 .handle_res_inst = kvm_trap_vz_no_handler, 2679 .handle_break = kvm_trap_vz_no_handler, 2680 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled, 2681 .handle_guest_exit = kvm_trap_vz_handle_guest_exit, 2682 2683 .hardware_enable = kvm_vz_hardware_enable, 2684 .hardware_disable = kvm_vz_hardware_disable, 2685 .check_extension = kvm_vz_check_extension, 2686 .vcpu_init = kvm_vz_vcpu_init, 2687 .vcpu_uninit = kvm_vz_vcpu_uninit, 2688 .vcpu_setup = kvm_vz_vcpu_setup, 2689 .flush_shadow_all = kvm_vz_flush_shadow_all, 2690 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot, 2691 .gva_to_gpa = kvm_vz_gva_to_gpa_cb, 2692 .queue_timer_int = kvm_vz_queue_timer_int_cb, 2693 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb, 2694 .queue_io_int = kvm_vz_queue_io_int_cb, 2695 .dequeue_io_int = kvm_vz_dequeue_io_int_cb, 2696 .irq_deliver = kvm_vz_irq_deliver_cb, 2697 .irq_clear = kvm_vz_irq_clear_cb, 2698 .num_regs = kvm_vz_num_regs, 2699 .copy_reg_indices = kvm_vz_copy_reg_indices, 2700 .get_one_reg = kvm_vz_get_one_reg, 2701 .set_one_reg = kvm_vz_set_one_reg, 2702 .vcpu_load = kvm_vz_vcpu_load, 2703 .vcpu_put = kvm_vz_vcpu_put, 2704 .vcpu_run = kvm_vz_vcpu_run, 2705 .vcpu_reenter = kvm_vz_vcpu_reenter, 2706 }; 2707 2708 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) 2709 { 2710 if (!cpu_has_vz) 2711 return -ENODEV; 2712 2713 /* 2714 * VZ requires at least 2 KScratch registers, so it should have been 2715 * possible to allocate pgd_reg. 2716 */ 2717 if (WARN(pgd_reg == -1, 2718 "pgd_reg not allocated even though cpu_has_vz\n")) 2719 return -ENODEV; 2720 2721 pr_info("Starting KVM with MIPS VZ extensions\n"); 2722 2723 *install_callbacks = &kvm_vz_callbacks; 2724 return 0; 2725 } 2726