1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: Support for hardware virtualization extensions 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Yann Le Du <ledu@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/preempt.h> 16 #include <linux/vmalloc.h> 17 #include <asm/cacheflush.h> 18 #include <asm/cacheops.h> 19 #include <asm/cmpxchg.h> 20 #include <asm/fpu.h> 21 #include <asm/hazards.h> 22 #include <asm/inst.h> 23 #include <asm/mmu_context.h> 24 #include <asm/r4kcache.h> 25 #include <asm/time.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbex.h> 28 29 #include <linux/kvm_host.h> 30 31 #include "interrupt.h" 32 33 #include "trace.h" 34 35 /* Pointers to last VCPU loaded on each physical CPU */ 36 static struct kvm_vcpu *last_vcpu[NR_CPUS]; 37 /* Pointers to last VCPU executed on each physical CPU */ 38 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS]; 39 40 /* 41 * Number of guest VTLB entries to use, so we can catch inconsistency between 42 * CPUs. 43 */ 44 static unsigned int kvm_vz_guest_vtlb_size; 45 46 static inline long kvm_vz_read_gc0_ebase(void) 47 { 48 if (sizeof(long) == 8 && cpu_has_ebase_wg) 49 return read_gc0_ebase_64(); 50 else 51 return read_gc0_ebase(); 52 } 53 54 static inline void kvm_vz_write_gc0_ebase(long v) 55 { 56 /* 57 * First write with WG=1 to write upper bits, then write again in case 58 * WG should be left at 0. 59 * write_gc0_ebase_64() is no longer UNDEFINED since R6. 60 */ 61 if (sizeof(long) == 8 && 62 (cpu_has_mips64r6 || cpu_has_ebase_wg)) { 63 write_gc0_ebase_64(v | MIPS_EBASE_WG); 64 write_gc0_ebase_64(v); 65 } else { 66 write_gc0_ebase(v | MIPS_EBASE_WG); 67 write_gc0_ebase(v); 68 } 69 } 70 71 /* 72 * These Config bits may be writable by the guest: 73 * Config: [K23, KU] (!TLB), K0 74 * Config1: (none) 75 * Config2: [TU, SU] (impl) 76 * Config3: ISAOnExc 77 * Config4: FTLBPageSize 78 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR 79 */ 80 81 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu) 82 { 83 return CONF_CM_CMASK; 84 } 85 86 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu) 87 { 88 return 0; 89 } 90 91 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu) 92 { 93 return 0; 94 } 95 96 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu) 97 { 98 return MIPS_CONF3_ISA_OE; 99 } 100 101 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu) 102 { 103 /* no need to be exact */ 104 return MIPS_CONF4_VFTLBPAGESIZE; 105 } 106 107 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu) 108 { 109 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI; 110 111 /* Permit MSAEn changes if MSA supported and enabled */ 112 if (kvm_mips_guest_has_msa(&vcpu->arch)) 113 mask |= MIPS_CONF5_MSAEN; 114 115 /* 116 * Permit guest FPU mode changes if FPU is enabled and the relevant 117 * feature exists according to FIR register. 118 */ 119 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 120 if (cpu_has_ufr) 121 mask |= MIPS_CONF5_UFR; 122 if (cpu_has_fre) 123 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE; 124 } 125 126 return mask; 127 } 128 129 /* 130 * VZ optionally allows these additional Config bits to be written by root: 131 * Config: M, [MT] 132 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP 133 * Config2: M 134 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC, 135 * VInt, SP, CDMM, MT, SM, TL] 136 * Config4: M, [VTLBSizeExt, MMUSizeExt] 137 * Config5: MRP 138 */ 139 140 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu) 141 { 142 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M; 143 } 144 145 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu) 146 { 147 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M; 148 149 /* Permit FPU to be present if FPU is supported */ 150 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 151 mask |= MIPS_CONF1_FP; 152 153 return mask; 154 } 155 156 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu) 157 { 158 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M; 159 } 160 161 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu) 162 { 163 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M | 164 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC; 165 166 /* Permit MSA to be present if MSA is supported */ 167 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 168 mask |= MIPS_CONF3_MSA; 169 170 return mask; 171 } 172 173 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu) 174 { 175 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M; 176 } 177 178 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu) 179 { 180 return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP; 181 } 182 183 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) 184 { 185 /* VZ guest has already converted gva to gpa */ 186 return gva; 187 } 188 189 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 190 { 191 set_bit(priority, &vcpu->arch.pending_exceptions); 192 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 193 } 194 195 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 196 { 197 clear_bit(priority, &vcpu->arch.pending_exceptions); 198 set_bit(priority, &vcpu->arch.pending_exceptions_clr); 199 } 200 201 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu) 202 { 203 /* 204 * timer expiry is asynchronous to vcpu execution therefore defer guest 205 * cp0 accesses 206 */ 207 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 208 } 209 210 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) 211 { 212 /* 213 * timer expiry is asynchronous to vcpu execution therefore defer guest 214 * cp0 accesses 215 */ 216 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); 217 } 218 219 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu, 220 struct kvm_mips_interrupt *irq) 221 { 222 int intr = (int)irq->irq; 223 224 /* 225 * interrupts are asynchronous to vcpu execution therefore defer guest 226 * cp0 accesses 227 */ 228 switch (intr) { 229 case 2: 230 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO); 231 break; 232 233 case 3: 234 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); 235 break; 236 237 case 4: 238 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); 239 break; 240 241 default: 242 break; 243 } 244 245 } 246 247 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 248 struct kvm_mips_interrupt *irq) 249 { 250 int intr = (int)irq->irq; 251 252 /* 253 * interrupts are asynchronous to vcpu execution therefore defer guest 254 * cp0 accesses 255 */ 256 switch (intr) { 257 case -2: 258 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO); 259 break; 260 261 case -3: 262 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); 263 break; 264 265 case -4: 266 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); 267 break; 268 269 default: 270 break; 271 } 272 273 } 274 275 static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = { 276 [MIPS_EXC_INT_TIMER] = C_IRQ5, 277 [MIPS_EXC_INT_IO] = C_IRQ0, 278 [MIPS_EXC_INT_IPI_1] = C_IRQ1, 279 [MIPS_EXC_INT_IPI_2] = C_IRQ2, 280 }; 281 282 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 283 u32 cause) 284 { 285 u32 irq = (priority < MIPS_EXC_MAX) ? 286 kvm_vz_priority_to_irq[priority] : 0; 287 288 switch (priority) { 289 case MIPS_EXC_INT_TIMER: 290 set_gc0_cause(C_TI); 291 break; 292 293 case MIPS_EXC_INT_IO: 294 case MIPS_EXC_INT_IPI_1: 295 case MIPS_EXC_INT_IPI_2: 296 if (cpu_has_guestctl2) 297 set_c0_guestctl2(irq); 298 else 299 set_gc0_cause(irq); 300 break; 301 302 default: 303 break; 304 } 305 306 clear_bit(priority, &vcpu->arch.pending_exceptions); 307 return 1; 308 } 309 310 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 311 u32 cause) 312 { 313 u32 irq = (priority < MIPS_EXC_MAX) ? 314 kvm_vz_priority_to_irq[priority] : 0; 315 316 switch (priority) { 317 case MIPS_EXC_INT_TIMER: 318 /* 319 * Call to kvm_write_c0_guest_compare() clears Cause.TI in 320 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with 321 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not 322 * supported or if not using GuestCtl2 Hardware Clear. 323 */ 324 if (cpu_has_guestctl2) { 325 if (!(read_c0_guestctl2() & (irq << 14))) 326 clear_c0_guestctl2(irq); 327 } else { 328 clear_gc0_cause(irq); 329 } 330 break; 331 332 case MIPS_EXC_INT_IO: 333 case MIPS_EXC_INT_IPI_1: 334 case MIPS_EXC_INT_IPI_2: 335 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ 336 if (cpu_has_guestctl2) { 337 if (!(read_c0_guestctl2() & (irq << 14))) 338 clear_c0_guestctl2(irq); 339 } else { 340 clear_gc0_cause(irq); 341 } 342 break; 343 344 default: 345 break; 346 } 347 348 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 349 return 1; 350 } 351 352 /* 353 * VZ guest timer handling. 354 */ 355 356 /** 357 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer. 358 * @vcpu: Virtual CPU. 359 * 360 * Returns: true if the VZ GTOffset & real guest CP0_Count should be used 361 * instead of software emulation of guest timer. 362 * false otherwise. 363 */ 364 static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu) 365 { 366 if (kvm_mips_count_disabled(vcpu)) 367 return false; 368 369 /* Chosen frequency must match real frequency */ 370 if (mips_hpt_frequency != vcpu->arch.count_hz) 371 return false; 372 373 /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */ 374 if (current_cpu_data.gtoffset_mask != 0xffffffff) 375 return false; 376 377 return true; 378 } 379 380 /** 381 * _kvm_vz_restore_stimer() - Restore soft timer state. 382 * @vcpu: Virtual CPU. 383 * @compare: CP0_Compare register value, restored by caller. 384 * @cause: CP0_Cause register to restore. 385 * 386 * Restore VZ state relating to the soft timer. The hard timer can be enabled 387 * later. 388 */ 389 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare, 390 u32 cause) 391 { 392 /* 393 * Avoid spurious counter interrupts by setting Guest CP0_Count to just 394 * after Guest CP0_Compare. 395 */ 396 write_c0_gtoffset(compare - read_c0_count()); 397 398 back_to_back_c0_hazard(); 399 write_gc0_cause(cause); 400 } 401 402 /** 403 * _kvm_vz_restore_htimer() - Restore hard timer state. 404 * @vcpu: Virtual CPU. 405 * @compare: CP0_Compare register value, restored by caller. 406 * @cause: CP0_Cause register to restore. 407 * 408 * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the 409 * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause. 410 */ 411 static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu, 412 u32 compare, u32 cause) 413 { 414 u32 start_count, after_count; 415 ktime_t freeze_time; 416 unsigned long flags; 417 418 /* 419 * Freeze the soft-timer and sync the guest CP0_Count with it. We do 420 * this with interrupts disabled to avoid latency. 421 */ 422 local_irq_save(flags); 423 freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count); 424 write_c0_gtoffset(start_count - read_c0_count()); 425 local_irq_restore(flags); 426 427 /* restore guest CP0_Cause, as TI may already be set */ 428 back_to_back_c0_hazard(); 429 write_gc0_cause(cause); 430 431 /* 432 * The above sequence isn't atomic and would result in lost timer 433 * interrupts if we're not careful. Detect if a timer interrupt is due 434 * and assert it. 435 */ 436 back_to_back_c0_hazard(); 437 after_count = read_gc0_count(); 438 if (after_count - start_count > compare - start_count - 1) 439 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 440 } 441 442 /** 443 * kvm_vz_restore_timer() - Restore timer state. 444 * @vcpu: Virtual CPU. 445 * 446 * Restore soft timer state from saved context. 447 */ 448 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) 449 { 450 struct mips_coproc *cop0 = vcpu->arch.cop0; 451 u32 cause, compare; 452 453 compare = kvm_read_sw_gc0_compare(cop0); 454 cause = kvm_read_sw_gc0_cause(cop0); 455 456 write_gc0_compare(compare); 457 _kvm_vz_restore_stimer(vcpu, compare, cause); 458 } 459 460 /** 461 * kvm_vz_acquire_htimer() - Switch to hard timer state. 462 * @vcpu: Virtual CPU. 463 * 464 * Restore hard timer state on top of existing soft timer state if possible. 465 * 466 * Since hard timer won't remain active over preemption, preemption should be 467 * disabled by the caller. 468 */ 469 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) 470 { 471 u32 gctl0; 472 473 gctl0 = read_c0_guestctl0(); 474 if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) { 475 /* enable guest access to hard timer */ 476 write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT); 477 478 _kvm_vz_restore_htimer(vcpu, read_gc0_compare(), 479 read_gc0_cause()); 480 } 481 } 482 483 /** 484 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer. 485 * @vcpu: Virtual CPU. 486 * @compare: Pointer to write compare value to. 487 * @cause: Pointer to write cause value to. 488 * 489 * Save VZ guest timer state and switch to software emulation of guest CP0 490 * timer. The hard timer must already be in use, so preemption should be 491 * disabled. 492 */ 493 static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu, 494 u32 *out_compare, u32 *out_cause) 495 { 496 u32 cause, compare, before_count, end_count; 497 ktime_t before_time; 498 499 compare = read_gc0_compare(); 500 *out_compare = compare; 501 502 before_time = ktime_get(); 503 504 /* 505 * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time 506 * at which no pending timer interrupt is missing. 507 */ 508 before_count = read_gc0_count(); 509 back_to_back_c0_hazard(); 510 cause = read_gc0_cause(); 511 *out_cause = cause; 512 513 /* 514 * Record a final CP0_Count which we will transfer to the soft-timer. 515 * This is recorded *after* saving CP0_Cause, so we don't get any timer 516 * interrupts from just after the final CP0_Count point. 517 */ 518 back_to_back_c0_hazard(); 519 end_count = read_gc0_count(); 520 521 /* 522 * The above sequence isn't atomic, so we could miss a timer interrupt 523 * between reading CP0_Cause and end_count. Detect and record any timer 524 * interrupt due between before_count and end_count. 525 */ 526 if (end_count - before_count > compare - before_count - 1) 527 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 528 529 /* 530 * Restore soft-timer, ignoring a small amount of negative drift due to 531 * delay between freeze_hrtimer and setting CP0_GTOffset. 532 */ 533 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000); 534 } 535 536 /** 537 * kvm_vz_save_timer() - Save guest timer state. 538 * @vcpu: Virtual CPU. 539 * 540 * Save VZ guest timer state and switch to soft guest timer if hard timer was in 541 * use. 542 */ 543 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) 544 { 545 struct mips_coproc *cop0 = vcpu->arch.cop0; 546 u32 gctl0, compare, cause; 547 548 gctl0 = read_c0_guestctl0(); 549 if (gctl0 & MIPS_GCTL0_GT) { 550 /* disable guest use of hard timer */ 551 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT); 552 553 /* save hard timer state */ 554 _kvm_vz_save_htimer(vcpu, &compare, &cause); 555 } else { 556 compare = read_gc0_compare(); 557 cause = read_gc0_cause(); 558 } 559 560 /* save timer-related state to VCPU context */ 561 kvm_write_sw_gc0_cause(cop0, cause); 562 kvm_write_sw_gc0_compare(cop0, compare); 563 } 564 565 /** 566 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use. 567 * @vcpu: Virtual CPU. 568 * 569 * Transfers the state of the hard guest timer to the soft guest timer, leaving 570 * guest state intact so it can continue to be used with the soft timer. 571 */ 572 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) 573 { 574 u32 gctl0, compare, cause; 575 576 preempt_disable(); 577 gctl0 = read_c0_guestctl0(); 578 if (gctl0 & MIPS_GCTL0_GT) { 579 /* disable guest use of timer */ 580 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT); 581 582 /* switch to soft timer */ 583 _kvm_vz_save_htimer(vcpu, &compare, &cause); 584 585 /* leave soft timer in usable state */ 586 _kvm_vz_restore_stimer(vcpu, compare, cause); 587 } 588 preempt_enable(); 589 } 590 591 /** 592 * is_eva_access() - Find whether an instruction is an EVA memory accessor. 593 * @inst: 32-bit instruction encoding. 594 * 595 * Finds whether @inst encodes an EVA memory access instruction, which would 596 * indicate that emulation of it should access the user mode address space 597 * instead of the kernel mode address space. This matters for MUSUK segments 598 * which are TLB mapped for user mode but unmapped for kernel mode. 599 * 600 * Returns: Whether @inst encodes an EVA accessor instruction. 601 */ 602 static bool is_eva_access(union mips_instruction inst) 603 { 604 if (inst.spec3_format.opcode != spec3_op) 605 return false; 606 607 switch (inst.spec3_format.func) { 608 case lwle_op: 609 case lwre_op: 610 case cachee_op: 611 case sbe_op: 612 case she_op: 613 case sce_op: 614 case swe_op: 615 case swle_op: 616 case swre_op: 617 case prefe_op: 618 case lbue_op: 619 case lhue_op: 620 case lbe_op: 621 case lhe_op: 622 case lle_op: 623 case lwe_op: 624 return true; 625 default: 626 return false; 627 } 628 } 629 630 /** 631 * is_eva_am_mapped() - Find whether an access mode is mapped. 632 * @vcpu: KVM VCPU state. 633 * @am: 3-bit encoded access mode. 634 * @eu: Segment becomes unmapped and uncached when Status.ERL=1. 635 * 636 * Decode @am to find whether it encodes a mapped segment for the current VCPU 637 * state. Where necessary @eu and the actual instruction causing the fault are 638 * taken into account to make the decision. 639 * 640 * Returns: Whether the VCPU faulted on a TLB mapped address. 641 */ 642 static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu) 643 { 644 u32 am_lookup; 645 int err; 646 647 /* 648 * Interpret access control mode. We assume address errors will already 649 * have been caught by the guest, leaving us with: 650 * AM UM SM KM 31..24 23..16 651 * UK 0 000 Unm 0 0 652 * MK 1 001 TLB 1 653 * MSK 2 010 TLB TLB 1 654 * MUSK 3 011 TLB TLB TLB 1 655 * MUSUK 4 100 TLB TLB Unm 0 1 656 * USK 5 101 Unm Unm 0 0 657 * - 6 110 0 0 658 * UUSK 7 111 Unm Unm Unm 0 0 659 * 660 * We shift a magic value by AM across the sign bit to find if always 661 * TLB mapped, and if not shift by 8 again to find if it depends on KM. 662 */ 663 am_lookup = 0x70080000 << am; 664 if ((s32)am_lookup < 0) { 665 /* 666 * MK, MSK, MUSK 667 * Always TLB mapped, unless SegCtl.EU && ERL 668 */ 669 if (!eu || !(read_gc0_status() & ST0_ERL)) 670 return true; 671 } else { 672 am_lookup <<= 8; 673 if ((s32)am_lookup < 0) { 674 union mips_instruction inst; 675 unsigned int status; 676 u32 *opc; 677 678 /* 679 * MUSUK 680 * TLB mapped if not in kernel mode 681 */ 682 status = read_gc0_status(); 683 if (!(status & (ST0_EXL | ST0_ERL)) && 684 (status & ST0_KSU)) 685 return true; 686 /* 687 * EVA access instructions in kernel 688 * mode access user address space. 689 */ 690 opc = (u32 *)vcpu->arch.pc; 691 if (vcpu->arch.host_cp0_cause & CAUSEF_BD) 692 opc += 1; 693 err = kvm_get_badinstr(opc, vcpu, &inst.word); 694 if (!err && is_eva_access(inst)) 695 return true; 696 } 697 } 698 699 return false; 700 } 701 702 /** 703 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA. 704 * @vcpu: KVM VCPU state. 705 * @gva: Guest virtual address to convert. 706 * @gpa: Output guest physical address. 707 * 708 * Convert a guest virtual address (GVA) which is valid according to the guest 709 * context, to a guest physical address (GPA). 710 * 711 * Returns: 0 on success. 712 * -errno on failure. 713 */ 714 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 715 unsigned long *gpa) 716 { 717 u32 gva32 = gva; 718 unsigned long segctl; 719 720 if ((long)gva == (s32)gva32) { 721 /* Handle canonical 32-bit virtual address */ 722 if (cpu_guest_has_segments) { 723 unsigned long mask, pa; 724 725 switch (gva32 >> 29) { 726 case 0: 727 case 1: /* CFG5 (1GB) */ 728 segctl = read_gc0_segctl2() >> 16; 729 mask = (unsigned long)0xfc0000000ull; 730 break; 731 case 2: 732 case 3: /* CFG4 (1GB) */ 733 segctl = read_gc0_segctl2(); 734 mask = (unsigned long)0xfc0000000ull; 735 break; 736 case 4: /* CFG3 (512MB) */ 737 segctl = read_gc0_segctl1() >> 16; 738 mask = (unsigned long)0xfe0000000ull; 739 break; 740 case 5: /* CFG2 (512MB) */ 741 segctl = read_gc0_segctl1(); 742 mask = (unsigned long)0xfe0000000ull; 743 break; 744 case 6: /* CFG1 (512MB) */ 745 segctl = read_gc0_segctl0() >> 16; 746 mask = (unsigned long)0xfe0000000ull; 747 break; 748 case 7: /* CFG0 (512MB) */ 749 segctl = read_gc0_segctl0(); 750 mask = (unsigned long)0xfe0000000ull; 751 break; 752 default: 753 /* 754 * GCC 4.9 isn't smart enough to figure out that 755 * segctl and mask are always initialised. 756 */ 757 unreachable(); 758 } 759 760 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7, 761 segctl & 0x0008)) 762 goto tlb_mapped; 763 764 /* Unmapped, find guest physical address */ 765 pa = (segctl << 20) & mask; 766 pa |= gva32 & ~mask; 767 *gpa = pa; 768 return 0; 769 } else if ((s32)gva32 < (s32)0xc0000000) { 770 /* legacy unmapped KSeg0 or KSeg1 */ 771 *gpa = gva32 & 0x1fffffff; 772 return 0; 773 } 774 #ifdef CONFIG_64BIT 775 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { 776 /* XKPHYS */ 777 if (cpu_guest_has_segments) { 778 /* 779 * Each of the 8 regions can be overridden by SegCtl2.XR 780 * to use SegCtl1.XAM. 781 */ 782 segctl = read_gc0_segctl2(); 783 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { 784 segctl = read_gc0_segctl1(); 785 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7, 786 0)) 787 goto tlb_mapped; 788 } 789 790 } 791 /* 792 * Traditionally fully unmapped. 793 * Bits 61:59 specify the CCA, which we can just mask off here. 794 * Bits 58:PABITS should be zero, but we shouldn't have got here 795 * if it wasn't. 796 */ 797 *gpa = gva & 0x07ffffffffffffff; 798 return 0; 799 #endif 800 } 801 802 tlb_mapped: 803 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); 804 } 805 806 /** 807 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA. 808 * @vcpu: KVM VCPU state. 809 * @badvaddr: Root BadVAddr. 810 * @gpa: Output guest physical address. 811 * 812 * VZ implementations are permitted to report guest virtual addresses (GVA) in 813 * BadVAddr on a root exception during guest execution, instead of the more 814 * convenient guest physical addresses (GPA). When we get a GVA, this function 815 * converts it to a GPA, taking into account guest segmentation and guest TLB 816 * state. 817 * 818 * Returns: 0 on success. 819 * -errno on failure. 820 */ 821 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr, 822 unsigned long *gpa) 823 { 824 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & 825 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 826 827 /* If BadVAddr is GPA, then all is well in the world */ 828 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) { 829 *gpa = badvaddr; 830 return 0; 831 } 832 833 /* Otherwise we'd expect it to be GVA ... */ 834 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA, 835 "Unexpected gexccode %#x\n", gexccode)) 836 return -EINVAL; 837 838 /* ... and we need to perform the GVA->GPA translation in software */ 839 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa); 840 } 841 842 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu) 843 { 844 u32 *opc = (u32 *) vcpu->arch.pc; 845 u32 cause = vcpu->arch.host_cp0_cause; 846 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 847 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 848 u32 inst = 0; 849 850 /* 851 * Fetch the instruction. 852 */ 853 if (cause & CAUSEF_BD) 854 opc += 1; 855 kvm_get_badinstr(opc, vcpu, &inst); 856 857 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", 858 exccode, opc, inst, badvaddr, 859 read_gc0_status()); 860 kvm_arch_vcpu_dump_regs(vcpu); 861 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 862 return RESUME_HOST; 863 } 864 865 static unsigned long mips_process_maar(unsigned int op, unsigned long val) 866 { 867 /* Mask off unused bits */ 868 unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL; 869 870 if (read_gc0_pagegrain() & PG_ELPA) 871 mask |= 0x00ffffff00000000ull; 872 if (cpu_guest_has_mvh) 873 mask |= MIPS_MAAR_VH; 874 875 /* Set or clear VH */ 876 if (op == mtc_op) { 877 /* clear VH */ 878 val &= ~MIPS_MAAR_VH; 879 } else if (op == dmtc_op) { 880 /* set VH to match VL */ 881 val &= ~MIPS_MAAR_VH; 882 if (val & MIPS_MAAR_VL) 883 val |= MIPS_MAAR_VH; 884 } 885 886 return val & mask; 887 } 888 889 static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val) 890 { 891 struct mips_coproc *cop0 = vcpu->arch.cop0; 892 893 val &= MIPS_MAARI_INDEX; 894 if (val == MIPS_MAARI_INDEX) 895 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1); 896 else if (val < ARRAY_SIZE(vcpu->arch.maar)) 897 kvm_write_sw_gc0_maari(cop0, val); 898 } 899 900 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, 901 u32 *opc, u32 cause, 902 struct kvm_run *run, 903 struct kvm_vcpu *vcpu) 904 { 905 struct mips_coproc *cop0 = vcpu->arch.cop0; 906 enum emulation_result er = EMULATE_DONE; 907 u32 rt, rd, sel; 908 unsigned long curr_pc; 909 unsigned long val; 910 911 /* 912 * Update PC and hold onto current PC in case there is 913 * an error and we want to rollback the PC 914 */ 915 curr_pc = vcpu->arch.pc; 916 er = update_pc(vcpu, cause); 917 if (er == EMULATE_FAIL) 918 return er; 919 920 if (inst.co_format.co) { 921 switch (inst.co_format.func) { 922 case wait_op: 923 er = kvm_mips_emul_wait(vcpu); 924 break; 925 default: 926 er = EMULATE_FAIL; 927 } 928 } else { 929 rt = inst.c0r_format.rt; 930 rd = inst.c0r_format.rd; 931 sel = inst.c0r_format.sel; 932 933 switch (inst.c0r_format.rs) { 934 case dmfc_op: 935 case mfc_op: 936 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 937 cop0->stat[rd][sel]++; 938 #endif 939 if (rd == MIPS_CP0_COUNT && 940 sel == 0) { /* Count */ 941 val = kvm_mips_read_count(vcpu); 942 } else if (rd == MIPS_CP0_COMPARE && 943 sel == 0) { /* Compare */ 944 val = read_gc0_compare(); 945 } else if (rd == MIPS_CP0_LLADDR && 946 sel == 0) { /* LLAddr */ 947 if (cpu_guest_has_rw_llb) 948 val = read_gc0_lladdr() & 949 MIPS_LLADDR_LLB; 950 else 951 val = 0; 952 } else if (rd == MIPS_CP0_LLADDR && 953 sel == 1 && /* MAAR */ 954 cpu_guest_has_maar && 955 !cpu_guest_has_dyn_maar) { 956 /* MAARI must be in range */ 957 BUG_ON(kvm_read_sw_gc0_maari(cop0) >= 958 ARRAY_SIZE(vcpu->arch.maar)); 959 val = vcpu->arch.maar[ 960 kvm_read_sw_gc0_maari(cop0)]; 961 } else if ((rd == MIPS_CP0_PRID && 962 (sel == 0 || /* PRid */ 963 sel == 2 || /* CDMMBase */ 964 sel == 3)) || /* CMGCRBase */ 965 (rd == MIPS_CP0_STATUS && 966 (sel == 2 || /* SRSCtl */ 967 sel == 3)) || /* SRSMap */ 968 (rd == MIPS_CP0_CONFIG && 969 (sel == 7)) || /* Config7 */ 970 (rd == MIPS_CP0_LLADDR && 971 (sel == 2) && /* MAARI */ 972 cpu_guest_has_maar && 973 !cpu_guest_has_dyn_maar) || 974 (rd == MIPS_CP0_ERRCTL && 975 (sel == 0))) { /* ErrCtl */ 976 val = cop0->reg[rd][sel]; 977 } else { 978 val = 0; 979 er = EMULATE_FAIL; 980 } 981 982 if (er != EMULATE_FAIL) { 983 /* Sign extend */ 984 if (inst.c0r_format.rs == mfc_op) 985 val = (int)val; 986 vcpu->arch.gprs[rt] = val; 987 } 988 989 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ? 990 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0, 991 KVM_TRACE_COP0(rd, sel), val); 992 break; 993 994 case dmtc_op: 995 case mtc_op: 996 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 997 cop0->stat[rd][sel]++; 998 #endif 999 val = vcpu->arch.gprs[rt]; 1000 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ? 1001 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0, 1002 KVM_TRACE_COP0(rd, sel), val); 1003 1004 if (rd == MIPS_CP0_COUNT && 1005 sel == 0) { /* Count */ 1006 kvm_vz_lose_htimer(vcpu); 1007 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 1008 } else if (rd == MIPS_CP0_COMPARE && 1009 sel == 0) { /* Compare */ 1010 kvm_mips_write_compare(vcpu, 1011 vcpu->arch.gprs[rt], 1012 true); 1013 } else if (rd == MIPS_CP0_LLADDR && 1014 sel == 0) { /* LLAddr */ 1015 /* 1016 * P5600 generates GPSI on guest MTC0 LLAddr. 1017 * Only allow the guest to clear LLB. 1018 */ 1019 if (cpu_guest_has_rw_llb && 1020 !(val & MIPS_LLADDR_LLB)) 1021 write_gc0_lladdr(0); 1022 } else if (rd == MIPS_CP0_LLADDR && 1023 sel == 1 && /* MAAR */ 1024 cpu_guest_has_maar && 1025 !cpu_guest_has_dyn_maar) { 1026 val = mips_process_maar(inst.c0r_format.rs, 1027 val); 1028 1029 /* MAARI must be in range */ 1030 BUG_ON(kvm_read_sw_gc0_maari(cop0) >= 1031 ARRAY_SIZE(vcpu->arch.maar)); 1032 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] = 1033 val; 1034 } else if (rd == MIPS_CP0_LLADDR && 1035 (sel == 2) && /* MAARI */ 1036 cpu_guest_has_maar && 1037 !cpu_guest_has_dyn_maar) { 1038 kvm_write_maari(vcpu, val); 1039 } else if (rd == MIPS_CP0_ERRCTL && 1040 (sel == 0)) { /* ErrCtl */ 1041 /* ignore the written value */ 1042 } else { 1043 er = EMULATE_FAIL; 1044 } 1045 break; 1046 1047 default: 1048 er = EMULATE_FAIL; 1049 break; 1050 } 1051 } 1052 /* Rollback PC only if emulation was unsuccessful */ 1053 if (er == EMULATE_FAIL) { 1054 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n", 1055 curr_pc, __func__, inst.word); 1056 1057 vcpu->arch.pc = curr_pc; 1058 } 1059 1060 return er; 1061 } 1062 1063 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, 1064 u32 *opc, u32 cause, 1065 struct kvm_run *run, 1066 struct kvm_vcpu *vcpu) 1067 { 1068 enum emulation_result er = EMULATE_DONE; 1069 u32 cache, op_inst, op, base; 1070 s16 offset; 1071 struct kvm_vcpu_arch *arch = &vcpu->arch; 1072 unsigned long va, curr_pc; 1073 1074 /* 1075 * Update PC and hold onto current PC in case there is 1076 * an error and we want to rollback the PC 1077 */ 1078 curr_pc = vcpu->arch.pc; 1079 er = update_pc(vcpu, cause); 1080 if (er == EMULATE_FAIL) 1081 return er; 1082 1083 base = inst.i_format.rs; 1084 op_inst = inst.i_format.rt; 1085 if (cpu_has_mips_r6) 1086 offset = inst.spec3_format.simmediate; 1087 else 1088 offset = inst.i_format.simmediate; 1089 cache = op_inst & CacheOp_Cache; 1090 op = op_inst & CacheOp_Op; 1091 1092 va = arch->gprs[base] + offset; 1093 1094 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1095 cache, op, base, arch->gprs[base], offset); 1096 1097 /* Secondary or tirtiary cache ops ignored */ 1098 if (cache != Cache_I && cache != Cache_D) 1099 return EMULATE_DONE; 1100 1101 switch (op_inst) { 1102 case Index_Invalidate_I: 1103 flush_icache_line_indexed(va); 1104 return EMULATE_DONE; 1105 case Index_Writeback_Inv_D: 1106 flush_dcache_line_indexed(va); 1107 return EMULATE_DONE; 1108 default: 1109 break; 1110 }; 1111 1112 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1113 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], 1114 offset); 1115 /* Rollback PC */ 1116 vcpu->arch.pc = curr_pc; 1117 1118 return EMULATE_FAIL; 1119 } 1120 1121 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, 1122 struct kvm_vcpu *vcpu) 1123 { 1124 enum emulation_result er = EMULATE_DONE; 1125 struct kvm_vcpu_arch *arch = &vcpu->arch; 1126 struct kvm_run *run = vcpu->run; 1127 union mips_instruction inst; 1128 int rd, rt, sel; 1129 int err; 1130 1131 /* 1132 * Fetch the instruction. 1133 */ 1134 if (cause & CAUSEF_BD) 1135 opc += 1; 1136 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1137 if (err) 1138 return EMULATE_FAIL; 1139 1140 switch (inst.r_format.opcode) { 1141 case cop0_op: 1142 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu); 1143 break; 1144 #ifndef CONFIG_CPU_MIPSR6 1145 case cache_op: 1146 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1147 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 1148 break; 1149 #endif 1150 case spec3_op: 1151 switch (inst.spec3_format.func) { 1152 #ifdef CONFIG_CPU_MIPSR6 1153 case cache6_op: 1154 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1155 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 1156 break; 1157 #endif 1158 case rdhwr_op: 1159 if (inst.r_format.rs || (inst.r_format.re >> 3)) 1160 goto unknown; 1161 1162 rd = inst.r_format.rd; 1163 rt = inst.r_format.rt; 1164 sel = inst.r_format.re & 0x7; 1165 1166 switch (rd) { 1167 case MIPS_HWR_CC: /* Read count register */ 1168 arch->gprs[rt] = 1169 (long)(int)kvm_mips_read_count(vcpu); 1170 break; 1171 default: 1172 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 1173 KVM_TRACE_HWR(rd, sel), 0); 1174 goto unknown; 1175 }; 1176 1177 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 1178 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); 1179 1180 er = update_pc(vcpu, cause); 1181 break; 1182 default: 1183 goto unknown; 1184 }; 1185 break; 1186 unknown: 1187 1188 default: 1189 kvm_err("GPSI exception not supported (%p/%#x)\n", 1190 opc, inst.word); 1191 kvm_arch_vcpu_dump_regs(vcpu); 1192 er = EMULATE_FAIL; 1193 break; 1194 } 1195 1196 return er; 1197 } 1198 1199 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc, 1200 struct kvm_vcpu *vcpu) 1201 { 1202 enum emulation_result er = EMULATE_DONE; 1203 struct kvm_vcpu_arch *arch = &vcpu->arch; 1204 union mips_instruction inst; 1205 int err; 1206 1207 /* 1208 * Fetch the instruction. 1209 */ 1210 if (cause & CAUSEF_BD) 1211 opc += 1; 1212 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1213 if (err) 1214 return EMULATE_FAIL; 1215 1216 /* complete MTC0 on behalf of guest and advance EPC */ 1217 if (inst.c0r_format.opcode == cop0_op && 1218 inst.c0r_format.rs == mtc_op && 1219 inst.c0r_format.z == 0) { 1220 int rt = inst.c0r_format.rt; 1221 int rd = inst.c0r_format.rd; 1222 int sel = inst.c0r_format.sel; 1223 unsigned int val = arch->gprs[rt]; 1224 unsigned int old_val, change; 1225 1226 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel), 1227 val); 1228 1229 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1230 /* FR bit should read as zero if no FPU */ 1231 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1232 val &= ~(ST0_CU1 | ST0_FR); 1233 1234 /* 1235 * Also don't allow FR to be set if host doesn't support 1236 * it. 1237 */ 1238 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) 1239 val &= ~ST0_FR; 1240 1241 old_val = read_gc0_status(); 1242 change = val ^ old_val; 1243 1244 if (change & ST0_FR) { 1245 /* 1246 * FPU and Vector register state is made 1247 * UNPREDICTABLE by a change of FR, so don't 1248 * even bother saving it. 1249 */ 1250 kvm_drop_fpu(vcpu); 1251 } 1252 1253 /* 1254 * If MSA state is already live, it is undefined how it 1255 * interacts with FR=0 FPU state, and we don't want to 1256 * hit reserved instruction exceptions trying to save 1257 * the MSA state later when CU=1 && FR=1, so play it 1258 * safe and save it first. 1259 */ 1260 if (change & ST0_CU1 && !(val & ST0_FR) && 1261 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1262 kvm_lose_fpu(vcpu); 1263 1264 write_gc0_status(val); 1265 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1266 u32 old_cause = read_gc0_cause(); 1267 u32 change = old_cause ^ val; 1268 1269 /* DC bit enabling/disabling timer? */ 1270 if (change & CAUSEF_DC) { 1271 if (val & CAUSEF_DC) { 1272 kvm_vz_lose_htimer(vcpu); 1273 kvm_mips_count_disable_cause(vcpu); 1274 } else { 1275 kvm_mips_count_enable_cause(vcpu); 1276 } 1277 } 1278 1279 /* Only certain bits are RW to the guest */ 1280 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP | 1281 CAUSEF_IP0 | CAUSEF_IP1); 1282 1283 /* WP can only be cleared */ 1284 change &= ~CAUSEF_WP | old_cause; 1285 1286 write_gc0_cause(old_cause ^ change); 1287 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */ 1288 write_gc0_intctl(val); 1289 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 1290 old_val = read_gc0_config5(); 1291 change = val ^ old_val; 1292 /* Handle changes in FPU/MSA modes */ 1293 preempt_disable(); 1294 1295 /* 1296 * Propagate FRE changes immediately if the FPU 1297 * context is already loaded. 1298 */ 1299 if (change & MIPS_CONF5_FRE && 1300 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1301 change_c0_config5(MIPS_CONF5_FRE, val); 1302 1303 preempt_enable(); 1304 1305 val = old_val ^ 1306 (change & kvm_vz_config5_guest_wrmask(vcpu)); 1307 write_gc0_config5(val); 1308 } else { 1309 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n", 1310 opc, inst.word); 1311 er = EMULATE_FAIL; 1312 } 1313 1314 if (er != EMULATE_FAIL) 1315 er = update_pc(vcpu, cause); 1316 } else { 1317 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n", 1318 opc, inst.word); 1319 er = EMULATE_FAIL; 1320 } 1321 1322 return er; 1323 } 1324 1325 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc, 1326 struct kvm_vcpu *vcpu) 1327 { 1328 enum emulation_result er; 1329 union mips_instruction inst; 1330 unsigned long curr_pc; 1331 int err; 1332 1333 if (cause & CAUSEF_BD) 1334 opc += 1; 1335 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1336 if (err) 1337 return EMULATE_FAIL; 1338 1339 /* 1340 * Update PC and hold onto current PC in case there is 1341 * an error and we want to rollback the PC 1342 */ 1343 curr_pc = vcpu->arch.pc; 1344 er = update_pc(vcpu, cause); 1345 if (er == EMULATE_FAIL) 1346 return er; 1347 1348 er = kvm_mips_emul_hypcall(vcpu, inst); 1349 if (er == EMULATE_FAIL) 1350 vcpu->arch.pc = curr_pc; 1351 1352 return er; 1353 } 1354 1355 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode, 1356 u32 cause, 1357 u32 *opc, 1358 struct kvm_vcpu *vcpu) 1359 { 1360 u32 inst; 1361 1362 /* 1363 * Fetch the instruction. 1364 */ 1365 if (cause & CAUSEF_BD) 1366 opc += 1; 1367 kvm_get_badinstr(opc, vcpu, &inst); 1368 1369 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n", 1370 gexccode, opc, inst, read_gc0_status()); 1371 1372 return EMULATE_FAIL; 1373 } 1374 1375 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu) 1376 { 1377 u32 *opc = (u32 *) vcpu->arch.pc; 1378 u32 cause = vcpu->arch.host_cp0_cause; 1379 enum emulation_result er = EMULATE_DONE; 1380 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & 1381 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 1382 int ret = RESUME_GUEST; 1383 1384 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode); 1385 switch (gexccode) { 1386 case MIPS_GCTL0_GEXC_GPSI: 1387 ++vcpu->stat.vz_gpsi_exits; 1388 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu); 1389 break; 1390 case MIPS_GCTL0_GEXC_GSFC: 1391 ++vcpu->stat.vz_gsfc_exits; 1392 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu); 1393 break; 1394 case MIPS_GCTL0_GEXC_HC: 1395 ++vcpu->stat.vz_hc_exits; 1396 er = kvm_trap_vz_handle_hc(cause, opc, vcpu); 1397 break; 1398 case MIPS_GCTL0_GEXC_GRR: 1399 ++vcpu->stat.vz_grr_exits; 1400 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1401 vcpu); 1402 break; 1403 case MIPS_GCTL0_GEXC_GVA: 1404 ++vcpu->stat.vz_gva_exits; 1405 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1406 vcpu); 1407 break; 1408 case MIPS_GCTL0_GEXC_GHFC: 1409 ++vcpu->stat.vz_ghfc_exits; 1410 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1411 vcpu); 1412 break; 1413 case MIPS_GCTL0_GEXC_GPA: 1414 ++vcpu->stat.vz_gpa_exits; 1415 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1416 vcpu); 1417 break; 1418 default: 1419 ++vcpu->stat.vz_resvd_exits; 1420 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1421 vcpu); 1422 break; 1423 1424 } 1425 1426 if (er == EMULATE_DONE) { 1427 ret = RESUME_GUEST; 1428 } else if (er == EMULATE_HYPERCALL) { 1429 ret = kvm_mips_handle_hypcall(vcpu); 1430 } else { 1431 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1432 ret = RESUME_HOST; 1433 } 1434 return ret; 1435 } 1436 1437 /** 1438 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor. 1439 * @vcpu: Virtual CPU context. 1440 * 1441 * Handle when the guest attempts to use a coprocessor which hasn't been allowed 1442 * by the root context. 1443 */ 1444 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) 1445 { 1446 struct kvm_run *run = vcpu->run; 1447 u32 cause = vcpu->arch.host_cp0_cause; 1448 enum emulation_result er = EMULATE_FAIL; 1449 int ret = RESUME_GUEST; 1450 1451 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { 1452 /* 1453 * If guest FPU not present, the FPU operation should have been 1454 * treated as a reserved instruction! 1455 * If FPU already in use, we shouldn't get this at all. 1456 */ 1457 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || 1458 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { 1459 preempt_enable(); 1460 return EMULATE_FAIL; 1461 } 1462 1463 kvm_own_fpu(vcpu); 1464 er = EMULATE_DONE; 1465 } 1466 /* other coprocessors not handled */ 1467 1468 switch (er) { 1469 case EMULATE_DONE: 1470 ret = RESUME_GUEST; 1471 break; 1472 1473 case EMULATE_FAIL: 1474 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1475 ret = RESUME_HOST; 1476 break; 1477 1478 default: 1479 BUG(); 1480 } 1481 return ret; 1482 } 1483 1484 /** 1485 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root. 1486 * @vcpu: Virtual CPU context. 1487 * 1488 * Handle when the guest attempts to use MSA when it is disabled in the root 1489 * context. 1490 */ 1491 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) 1492 { 1493 struct kvm_run *run = vcpu->run; 1494 1495 /* 1496 * If MSA not present or not exposed to guest or FR=0, the MSA operation 1497 * should have been treated as a reserved instruction! 1498 * Same if CU1=1, FR=0. 1499 * If MSA already in use, we shouldn't get this at all. 1500 */ 1501 if (!kvm_mips_guest_has_msa(&vcpu->arch) || 1502 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 || 1503 !(read_gc0_config5() & MIPS_CONF5_MSAEN) || 1504 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { 1505 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1506 return RESUME_HOST; 1507 } 1508 1509 kvm_own_msa(vcpu); 1510 1511 return RESUME_GUEST; 1512 } 1513 1514 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) 1515 { 1516 struct kvm_run *run = vcpu->run; 1517 u32 *opc = (u32 *) vcpu->arch.pc; 1518 u32 cause = vcpu->arch.host_cp0_cause; 1519 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1520 union mips_instruction inst; 1521 enum emulation_result er = EMULATE_DONE; 1522 int err, ret = RESUME_GUEST; 1523 1524 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) { 1525 /* A code fetch fault doesn't count as an MMIO */ 1526 if (kvm_is_ifetch_fault(&vcpu->arch)) { 1527 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1528 return RESUME_HOST; 1529 } 1530 1531 /* Fetch the instruction */ 1532 if (cause & CAUSEF_BD) 1533 opc += 1; 1534 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1535 if (err) { 1536 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1537 return RESUME_HOST; 1538 } 1539 1540 /* Treat as MMIO */ 1541 er = kvm_mips_emulate_load(inst, cause, run, vcpu); 1542 if (er == EMULATE_FAIL) { 1543 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1544 opc, badvaddr); 1545 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1546 } 1547 } 1548 1549 if (er == EMULATE_DONE) { 1550 ret = RESUME_GUEST; 1551 } else if (er == EMULATE_DO_MMIO) { 1552 run->exit_reason = KVM_EXIT_MMIO; 1553 ret = RESUME_HOST; 1554 } else { 1555 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1556 ret = RESUME_HOST; 1557 } 1558 return ret; 1559 } 1560 1561 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu) 1562 { 1563 struct kvm_run *run = vcpu->run; 1564 u32 *opc = (u32 *) vcpu->arch.pc; 1565 u32 cause = vcpu->arch.host_cp0_cause; 1566 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1567 union mips_instruction inst; 1568 enum emulation_result er = EMULATE_DONE; 1569 int err; 1570 int ret = RESUME_GUEST; 1571 1572 /* Just try the access again if we couldn't do the translation */ 1573 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr)) 1574 return RESUME_GUEST; 1575 vcpu->arch.host_cp0_badvaddr = badvaddr; 1576 1577 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) { 1578 /* Fetch the instruction */ 1579 if (cause & CAUSEF_BD) 1580 opc += 1; 1581 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1582 if (err) { 1583 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1584 return RESUME_HOST; 1585 } 1586 1587 /* Treat as MMIO */ 1588 er = kvm_mips_emulate_store(inst, cause, run, vcpu); 1589 if (er == EMULATE_FAIL) { 1590 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1591 opc, badvaddr); 1592 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1593 } 1594 } 1595 1596 if (er == EMULATE_DONE) { 1597 ret = RESUME_GUEST; 1598 } else if (er == EMULATE_DO_MMIO) { 1599 run->exit_reason = KVM_EXIT_MMIO; 1600 ret = RESUME_HOST; 1601 } else { 1602 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1603 ret = RESUME_HOST; 1604 } 1605 return ret; 1606 } 1607 1608 static u64 kvm_vz_get_one_regs[] = { 1609 KVM_REG_MIPS_CP0_INDEX, 1610 KVM_REG_MIPS_CP0_ENTRYLO0, 1611 KVM_REG_MIPS_CP0_ENTRYLO1, 1612 KVM_REG_MIPS_CP0_CONTEXT, 1613 KVM_REG_MIPS_CP0_PAGEMASK, 1614 KVM_REG_MIPS_CP0_PAGEGRAIN, 1615 KVM_REG_MIPS_CP0_WIRED, 1616 KVM_REG_MIPS_CP0_HWRENA, 1617 KVM_REG_MIPS_CP0_BADVADDR, 1618 KVM_REG_MIPS_CP0_COUNT, 1619 KVM_REG_MIPS_CP0_ENTRYHI, 1620 KVM_REG_MIPS_CP0_COMPARE, 1621 KVM_REG_MIPS_CP0_STATUS, 1622 KVM_REG_MIPS_CP0_INTCTL, 1623 KVM_REG_MIPS_CP0_CAUSE, 1624 KVM_REG_MIPS_CP0_EPC, 1625 KVM_REG_MIPS_CP0_PRID, 1626 KVM_REG_MIPS_CP0_EBASE, 1627 KVM_REG_MIPS_CP0_CONFIG, 1628 KVM_REG_MIPS_CP0_CONFIG1, 1629 KVM_REG_MIPS_CP0_CONFIG2, 1630 KVM_REG_MIPS_CP0_CONFIG3, 1631 KVM_REG_MIPS_CP0_CONFIG4, 1632 KVM_REG_MIPS_CP0_CONFIG5, 1633 #ifdef CONFIG_64BIT 1634 KVM_REG_MIPS_CP0_XCONTEXT, 1635 #endif 1636 KVM_REG_MIPS_CP0_ERROREPC, 1637 1638 KVM_REG_MIPS_COUNT_CTL, 1639 KVM_REG_MIPS_COUNT_RESUME, 1640 KVM_REG_MIPS_COUNT_HZ, 1641 }; 1642 1643 static u64 kvm_vz_get_one_regs_contextconfig[] = { 1644 KVM_REG_MIPS_CP0_CONTEXTCONFIG, 1645 #ifdef CONFIG_64BIT 1646 KVM_REG_MIPS_CP0_XCONTEXTCONFIG, 1647 #endif 1648 }; 1649 1650 static u64 kvm_vz_get_one_regs_segments[] = { 1651 KVM_REG_MIPS_CP0_SEGCTL0, 1652 KVM_REG_MIPS_CP0_SEGCTL1, 1653 KVM_REG_MIPS_CP0_SEGCTL2, 1654 }; 1655 1656 static u64 kvm_vz_get_one_regs_htw[] = { 1657 KVM_REG_MIPS_CP0_PWBASE, 1658 KVM_REG_MIPS_CP0_PWFIELD, 1659 KVM_REG_MIPS_CP0_PWSIZE, 1660 KVM_REG_MIPS_CP0_PWCTL, 1661 }; 1662 1663 static u64 kvm_vz_get_one_regs_kscratch[] = { 1664 KVM_REG_MIPS_CP0_KSCRATCH1, 1665 KVM_REG_MIPS_CP0_KSCRATCH2, 1666 KVM_REG_MIPS_CP0_KSCRATCH3, 1667 KVM_REG_MIPS_CP0_KSCRATCH4, 1668 KVM_REG_MIPS_CP0_KSCRATCH5, 1669 KVM_REG_MIPS_CP0_KSCRATCH6, 1670 }; 1671 1672 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu) 1673 { 1674 unsigned long ret; 1675 1676 ret = ARRAY_SIZE(kvm_vz_get_one_regs); 1677 if (cpu_guest_has_userlocal) 1678 ++ret; 1679 if (cpu_guest_has_badinstr) 1680 ++ret; 1681 if (cpu_guest_has_badinstrp) 1682 ++ret; 1683 if (cpu_guest_has_contextconfig) 1684 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); 1685 if (cpu_guest_has_segments) 1686 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments); 1687 if (cpu_guest_has_htw) 1688 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw); 1689 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) 1690 ret += 1 + ARRAY_SIZE(vcpu->arch.maar); 1691 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask); 1692 1693 return ret; 1694 } 1695 1696 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) 1697 { 1698 u64 index; 1699 unsigned int i; 1700 1701 if (copy_to_user(indices, kvm_vz_get_one_regs, 1702 sizeof(kvm_vz_get_one_regs))) 1703 return -EFAULT; 1704 indices += ARRAY_SIZE(kvm_vz_get_one_regs); 1705 1706 if (cpu_guest_has_userlocal) { 1707 index = KVM_REG_MIPS_CP0_USERLOCAL; 1708 if (copy_to_user(indices, &index, sizeof(index))) 1709 return -EFAULT; 1710 ++indices; 1711 } 1712 if (cpu_guest_has_badinstr) { 1713 index = KVM_REG_MIPS_CP0_BADINSTR; 1714 if (copy_to_user(indices, &index, sizeof(index))) 1715 return -EFAULT; 1716 ++indices; 1717 } 1718 if (cpu_guest_has_badinstrp) { 1719 index = KVM_REG_MIPS_CP0_BADINSTRP; 1720 if (copy_to_user(indices, &index, sizeof(index))) 1721 return -EFAULT; 1722 ++indices; 1723 } 1724 if (cpu_guest_has_contextconfig) { 1725 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig, 1726 sizeof(kvm_vz_get_one_regs_contextconfig))) 1727 return -EFAULT; 1728 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); 1729 } 1730 if (cpu_guest_has_segments) { 1731 if (copy_to_user(indices, kvm_vz_get_one_regs_segments, 1732 sizeof(kvm_vz_get_one_regs_segments))) 1733 return -EFAULT; 1734 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments); 1735 } 1736 if (cpu_guest_has_htw) { 1737 if (copy_to_user(indices, kvm_vz_get_one_regs_htw, 1738 sizeof(kvm_vz_get_one_regs_htw))) 1739 return -EFAULT; 1740 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw); 1741 } 1742 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) { 1743 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) { 1744 index = KVM_REG_MIPS_CP0_MAAR(i); 1745 if (copy_to_user(indices, &index, sizeof(index))) 1746 return -EFAULT; 1747 ++indices; 1748 } 1749 1750 index = KVM_REG_MIPS_CP0_MAARI; 1751 if (copy_to_user(indices, &index, sizeof(index))) 1752 return -EFAULT; 1753 ++indices; 1754 } 1755 for (i = 0; i < 6; ++i) { 1756 if (!cpu_guest_has_kscr(i + 2)) 1757 continue; 1758 1759 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i], 1760 sizeof(kvm_vz_get_one_regs_kscratch[i]))) 1761 return -EFAULT; 1762 ++indices; 1763 } 1764 1765 return 0; 1766 } 1767 1768 static inline s64 entrylo_kvm_to_user(unsigned long v) 1769 { 1770 s64 mask, ret = v; 1771 1772 if (BITS_PER_LONG == 32) { 1773 /* 1774 * KVM API exposes 64-bit version of the register, so move the 1775 * RI/XI bits up into place. 1776 */ 1777 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1778 ret &= ~mask; 1779 ret |= ((s64)v & mask) << 32; 1780 } 1781 return ret; 1782 } 1783 1784 static inline unsigned long entrylo_user_to_kvm(s64 v) 1785 { 1786 unsigned long mask, ret = v; 1787 1788 if (BITS_PER_LONG == 32) { 1789 /* 1790 * KVM API exposes 64-bit versiono of the register, so move the 1791 * RI/XI bits down into place. 1792 */ 1793 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1794 ret &= ~mask; 1795 ret |= (v >> 32) & mask; 1796 } 1797 return ret; 1798 } 1799 1800 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, 1801 const struct kvm_one_reg *reg, 1802 s64 *v) 1803 { 1804 struct mips_coproc *cop0 = vcpu->arch.cop0; 1805 unsigned int idx; 1806 1807 switch (reg->id) { 1808 case KVM_REG_MIPS_CP0_INDEX: 1809 *v = (long)read_gc0_index(); 1810 break; 1811 case KVM_REG_MIPS_CP0_ENTRYLO0: 1812 *v = entrylo_kvm_to_user(read_gc0_entrylo0()); 1813 break; 1814 case KVM_REG_MIPS_CP0_ENTRYLO1: 1815 *v = entrylo_kvm_to_user(read_gc0_entrylo1()); 1816 break; 1817 case KVM_REG_MIPS_CP0_CONTEXT: 1818 *v = (long)read_gc0_context(); 1819 break; 1820 case KVM_REG_MIPS_CP0_CONTEXTCONFIG: 1821 if (!cpu_guest_has_contextconfig) 1822 return -EINVAL; 1823 *v = read_gc0_contextconfig(); 1824 break; 1825 case KVM_REG_MIPS_CP0_USERLOCAL: 1826 if (!cpu_guest_has_userlocal) 1827 return -EINVAL; 1828 *v = read_gc0_userlocal(); 1829 break; 1830 #ifdef CONFIG_64BIT 1831 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: 1832 if (!cpu_guest_has_contextconfig) 1833 return -EINVAL; 1834 *v = read_gc0_xcontextconfig(); 1835 break; 1836 #endif 1837 case KVM_REG_MIPS_CP0_PAGEMASK: 1838 *v = (long)read_gc0_pagemask(); 1839 break; 1840 case KVM_REG_MIPS_CP0_PAGEGRAIN: 1841 *v = (long)read_gc0_pagegrain(); 1842 break; 1843 case KVM_REG_MIPS_CP0_SEGCTL0: 1844 if (!cpu_guest_has_segments) 1845 return -EINVAL; 1846 *v = read_gc0_segctl0(); 1847 break; 1848 case KVM_REG_MIPS_CP0_SEGCTL1: 1849 if (!cpu_guest_has_segments) 1850 return -EINVAL; 1851 *v = read_gc0_segctl1(); 1852 break; 1853 case KVM_REG_MIPS_CP0_SEGCTL2: 1854 if (!cpu_guest_has_segments) 1855 return -EINVAL; 1856 *v = read_gc0_segctl2(); 1857 break; 1858 case KVM_REG_MIPS_CP0_PWBASE: 1859 if (!cpu_guest_has_htw) 1860 return -EINVAL; 1861 *v = read_gc0_pwbase(); 1862 break; 1863 case KVM_REG_MIPS_CP0_PWFIELD: 1864 if (!cpu_guest_has_htw) 1865 return -EINVAL; 1866 *v = read_gc0_pwfield(); 1867 break; 1868 case KVM_REG_MIPS_CP0_PWSIZE: 1869 if (!cpu_guest_has_htw) 1870 return -EINVAL; 1871 *v = read_gc0_pwsize(); 1872 break; 1873 case KVM_REG_MIPS_CP0_WIRED: 1874 *v = (long)read_gc0_wired(); 1875 break; 1876 case KVM_REG_MIPS_CP0_PWCTL: 1877 if (!cpu_guest_has_htw) 1878 return -EINVAL; 1879 *v = read_gc0_pwctl(); 1880 break; 1881 case KVM_REG_MIPS_CP0_HWRENA: 1882 *v = (long)read_gc0_hwrena(); 1883 break; 1884 case KVM_REG_MIPS_CP0_BADVADDR: 1885 *v = (long)read_gc0_badvaddr(); 1886 break; 1887 case KVM_REG_MIPS_CP0_BADINSTR: 1888 if (!cpu_guest_has_badinstr) 1889 return -EINVAL; 1890 *v = read_gc0_badinstr(); 1891 break; 1892 case KVM_REG_MIPS_CP0_BADINSTRP: 1893 if (!cpu_guest_has_badinstrp) 1894 return -EINVAL; 1895 *v = read_gc0_badinstrp(); 1896 break; 1897 case KVM_REG_MIPS_CP0_COUNT: 1898 *v = kvm_mips_read_count(vcpu); 1899 break; 1900 case KVM_REG_MIPS_CP0_ENTRYHI: 1901 *v = (long)read_gc0_entryhi(); 1902 break; 1903 case KVM_REG_MIPS_CP0_COMPARE: 1904 *v = (long)read_gc0_compare(); 1905 break; 1906 case KVM_REG_MIPS_CP0_STATUS: 1907 *v = (long)read_gc0_status(); 1908 break; 1909 case KVM_REG_MIPS_CP0_INTCTL: 1910 *v = read_gc0_intctl(); 1911 break; 1912 case KVM_REG_MIPS_CP0_CAUSE: 1913 *v = (long)read_gc0_cause(); 1914 break; 1915 case KVM_REG_MIPS_CP0_EPC: 1916 *v = (long)read_gc0_epc(); 1917 break; 1918 case KVM_REG_MIPS_CP0_PRID: 1919 *v = (long)kvm_read_c0_guest_prid(cop0); 1920 break; 1921 case KVM_REG_MIPS_CP0_EBASE: 1922 *v = kvm_vz_read_gc0_ebase(); 1923 break; 1924 case KVM_REG_MIPS_CP0_CONFIG: 1925 *v = read_gc0_config(); 1926 break; 1927 case KVM_REG_MIPS_CP0_CONFIG1: 1928 if (!cpu_guest_has_conf1) 1929 return -EINVAL; 1930 *v = read_gc0_config1(); 1931 break; 1932 case KVM_REG_MIPS_CP0_CONFIG2: 1933 if (!cpu_guest_has_conf2) 1934 return -EINVAL; 1935 *v = read_gc0_config2(); 1936 break; 1937 case KVM_REG_MIPS_CP0_CONFIG3: 1938 if (!cpu_guest_has_conf3) 1939 return -EINVAL; 1940 *v = read_gc0_config3(); 1941 break; 1942 case KVM_REG_MIPS_CP0_CONFIG4: 1943 if (!cpu_guest_has_conf4) 1944 return -EINVAL; 1945 *v = read_gc0_config4(); 1946 break; 1947 case KVM_REG_MIPS_CP0_CONFIG5: 1948 if (!cpu_guest_has_conf5) 1949 return -EINVAL; 1950 *v = read_gc0_config5(); 1951 break; 1952 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): 1953 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) 1954 return -EINVAL; 1955 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); 1956 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) 1957 return -EINVAL; 1958 *v = vcpu->arch.maar[idx]; 1959 break; 1960 case KVM_REG_MIPS_CP0_MAARI: 1961 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) 1962 return -EINVAL; 1963 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); 1964 break; 1965 #ifdef CONFIG_64BIT 1966 case KVM_REG_MIPS_CP0_XCONTEXT: 1967 *v = read_gc0_xcontext(); 1968 break; 1969 #endif 1970 case KVM_REG_MIPS_CP0_ERROREPC: 1971 *v = (long)read_gc0_errorepc(); 1972 break; 1973 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 1974 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 1975 if (!cpu_guest_has_kscr(idx)) 1976 return -EINVAL; 1977 switch (idx) { 1978 case 2: 1979 *v = (long)read_gc0_kscratch1(); 1980 break; 1981 case 3: 1982 *v = (long)read_gc0_kscratch2(); 1983 break; 1984 case 4: 1985 *v = (long)read_gc0_kscratch3(); 1986 break; 1987 case 5: 1988 *v = (long)read_gc0_kscratch4(); 1989 break; 1990 case 6: 1991 *v = (long)read_gc0_kscratch5(); 1992 break; 1993 case 7: 1994 *v = (long)read_gc0_kscratch6(); 1995 break; 1996 } 1997 break; 1998 case KVM_REG_MIPS_COUNT_CTL: 1999 *v = vcpu->arch.count_ctl; 2000 break; 2001 case KVM_REG_MIPS_COUNT_RESUME: 2002 *v = ktime_to_ns(vcpu->arch.count_resume); 2003 break; 2004 case KVM_REG_MIPS_COUNT_HZ: 2005 *v = vcpu->arch.count_hz; 2006 break; 2007 default: 2008 return -EINVAL; 2009 } 2010 return 0; 2011 } 2012 2013 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, 2014 const struct kvm_one_reg *reg, 2015 s64 v) 2016 { 2017 struct mips_coproc *cop0 = vcpu->arch.cop0; 2018 unsigned int idx; 2019 int ret = 0; 2020 unsigned int cur, change; 2021 2022 switch (reg->id) { 2023 case KVM_REG_MIPS_CP0_INDEX: 2024 write_gc0_index(v); 2025 break; 2026 case KVM_REG_MIPS_CP0_ENTRYLO0: 2027 write_gc0_entrylo0(entrylo_user_to_kvm(v)); 2028 break; 2029 case KVM_REG_MIPS_CP0_ENTRYLO1: 2030 write_gc0_entrylo1(entrylo_user_to_kvm(v)); 2031 break; 2032 case KVM_REG_MIPS_CP0_CONTEXT: 2033 write_gc0_context(v); 2034 break; 2035 case KVM_REG_MIPS_CP0_CONTEXTCONFIG: 2036 if (!cpu_guest_has_contextconfig) 2037 return -EINVAL; 2038 write_gc0_contextconfig(v); 2039 break; 2040 case KVM_REG_MIPS_CP0_USERLOCAL: 2041 if (!cpu_guest_has_userlocal) 2042 return -EINVAL; 2043 write_gc0_userlocal(v); 2044 break; 2045 #ifdef CONFIG_64BIT 2046 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: 2047 if (!cpu_guest_has_contextconfig) 2048 return -EINVAL; 2049 write_gc0_xcontextconfig(v); 2050 break; 2051 #endif 2052 case KVM_REG_MIPS_CP0_PAGEMASK: 2053 write_gc0_pagemask(v); 2054 break; 2055 case KVM_REG_MIPS_CP0_PAGEGRAIN: 2056 write_gc0_pagegrain(v); 2057 break; 2058 case KVM_REG_MIPS_CP0_SEGCTL0: 2059 if (!cpu_guest_has_segments) 2060 return -EINVAL; 2061 write_gc0_segctl0(v); 2062 break; 2063 case KVM_REG_MIPS_CP0_SEGCTL1: 2064 if (!cpu_guest_has_segments) 2065 return -EINVAL; 2066 write_gc0_segctl1(v); 2067 break; 2068 case KVM_REG_MIPS_CP0_SEGCTL2: 2069 if (!cpu_guest_has_segments) 2070 return -EINVAL; 2071 write_gc0_segctl2(v); 2072 break; 2073 case KVM_REG_MIPS_CP0_PWBASE: 2074 if (!cpu_guest_has_htw) 2075 return -EINVAL; 2076 write_gc0_pwbase(v); 2077 break; 2078 case KVM_REG_MIPS_CP0_PWFIELD: 2079 if (!cpu_guest_has_htw) 2080 return -EINVAL; 2081 write_gc0_pwfield(v); 2082 break; 2083 case KVM_REG_MIPS_CP0_PWSIZE: 2084 if (!cpu_guest_has_htw) 2085 return -EINVAL; 2086 write_gc0_pwsize(v); 2087 break; 2088 case KVM_REG_MIPS_CP0_WIRED: 2089 change_gc0_wired(MIPSR6_WIRED_WIRED, v); 2090 break; 2091 case KVM_REG_MIPS_CP0_PWCTL: 2092 if (!cpu_guest_has_htw) 2093 return -EINVAL; 2094 write_gc0_pwctl(v); 2095 break; 2096 case KVM_REG_MIPS_CP0_HWRENA: 2097 write_gc0_hwrena(v); 2098 break; 2099 case KVM_REG_MIPS_CP0_BADVADDR: 2100 write_gc0_badvaddr(v); 2101 break; 2102 case KVM_REG_MIPS_CP0_BADINSTR: 2103 if (!cpu_guest_has_badinstr) 2104 return -EINVAL; 2105 write_gc0_badinstr(v); 2106 break; 2107 case KVM_REG_MIPS_CP0_BADINSTRP: 2108 if (!cpu_guest_has_badinstrp) 2109 return -EINVAL; 2110 write_gc0_badinstrp(v); 2111 break; 2112 case KVM_REG_MIPS_CP0_COUNT: 2113 kvm_mips_write_count(vcpu, v); 2114 break; 2115 case KVM_REG_MIPS_CP0_ENTRYHI: 2116 write_gc0_entryhi(v); 2117 break; 2118 case KVM_REG_MIPS_CP0_COMPARE: 2119 kvm_mips_write_compare(vcpu, v, false); 2120 break; 2121 case KVM_REG_MIPS_CP0_STATUS: 2122 write_gc0_status(v); 2123 break; 2124 case KVM_REG_MIPS_CP0_INTCTL: 2125 write_gc0_intctl(v); 2126 break; 2127 case KVM_REG_MIPS_CP0_CAUSE: 2128 /* 2129 * If the timer is stopped or started (DC bit) it must look 2130 * atomic with changes to the timer interrupt pending bit (TI). 2131 * A timer interrupt should not happen in between. 2132 */ 2133 if ((read_gc0_cause() ^ v) & CAUSEF_DC) { 2134 if (v & CAUSEF_DC) { 2135 /* disable timer first */ 2136 kvm_mips_count_disable_cause(vcpu); 2137 change_gc0_cause((u32)~CAUSEF_DC, v); 2138 } else { 2139 /* enable timer last */ 2140 change_gc0_cause((u32)~CAUSEF_DC, v); 2141 kvm_mips_count_enable_cause(vcpu); 2142 } 2143 } else { 2144 write_gc0_cause(v); 2145 } 2146 break; 2147 case KVM_REG_MIPS_CP0_EPC: 2148 write_gc0_epc(v); 2149 break; 2150 case KVM_REG_MIPS_CP0_PRID: 2151 kvm_write_c0_guest_prid(cop0, v); 2152 break; 2153 case KVM_REG_MIPS_CP0_EBASE: 2154 kvm_vz_write_gc0_ebase(v); 2155 break; 2156 case KVM_REG_MIPS_CP0_CONFIG: 2157 cur = read_gc0_config(); 2158 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu); 2159 if (change) { 2160 v = cur ^ change; 2161 write_gc0_config(v); 2162 } 2163 break; 2164 case KVM_REG_MIPS_CP0_CONFIG1: 2165 if (!cpu_guest_has_conf1) 2166 break; 2167 cur = read_gc0_config1(); 2168 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu); 2169 if (change) { 2170 v = cur ^ change; 2171 write_gc0_config1(v); 2172 } 2173 break; 2174 case KVM_REG_MIPS_CP0_CONFIG2: 2175 if (!cpu_guest_has_conf2) 2176 break; 2177 cur = read_gc0_config2(); 2178 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu); 2179 if (change) { 2180 v = cur ^ change; 2181 write_gc0_config2(v); 2182 } 2183 break; 2184 case KVM_REG_MIPS_CP0_CONFIG3: 2185 if (!cpu_guest_has_conf3) 2186 break; 2187 cur = read_gc0_config3(); 2188 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu); 2189 if (change) { 2190 v = cur ^ change; 2191 write_gc0_config3(v); 2192 } 2193 break; 2194 case KVM_REG_MIPS_CP0_CONFIG4: 2195 if (!cpu_guest_has_conf4) 2196 break; 2197 cur = read_gc0_config4(); 2198 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu); 2199 if (change) { 2200 v = cur ^ change; 2201 write_gc0_config4(v); 2202 } 2203 break; 2204 case KVM_REG_MIPS_CP0_CONFIG5: 2205 if (!cpu_guest_has_conf5) 2206 break; 2207 cur = read_gc0_config5(); 2208 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu); 2209 if (change) { 2210 v = cur ^ change; 2211 write_gc0_config5(v); 2212 } 2213 break; 2214 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): 2215 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) 2216 return -EINVAL; 2217 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); 2218 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) 2219 return -EINVAL; 2220 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); 2221 break; 2222 case KVM_REG_MIPS_CP0_MAARI: 2223 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) 2224 return -EINVAL; 2225 kvm_write_maari(vcpu, v); 2226 break; 2227 #ifdef CONFIG_64BIT 2228 case KVM_REG_MIPS_CP0_XCONTEXT: 2229 write_gc0_xcontext(v); 2230 break; 2231 #endif 2232 case KVM_REG_MIPS_CP0_ERROREPC: 2233 write_gc0_errorepc(v); 2234 break; 2235 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 2236 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 2237 if (!cpu_guest_has_kscr(idx)) 2238 return -EINVAL; 2239 switch (idx) { 2240 case 2: 2241 write_gc0_kscratch1(v); 2242 break; 2243 case 3: 2244 write_gc0_kscratch2(v); 2245 break; 2246 case 4: 2247 write_gc0_kscratch3(v); 2248 break; 2249 case 5: 2250 write_gc0_kscratch4(v); 2251 break; 2252 case 6: 2253 write_gc0_kscratch5(v); 2254 break; 2255 case 7: 2256 write_gc0_kscratch6(v); 2257 break; 2258 } 2259 break; 2260 case KVM_REG_MIPS_COUNT_CTL: 2261 ret = kvm_mips_set_count_ctl(vcpu, v); 2262 break; 2263 case KVM_REG_MIPS_COUNT_RESUME: 2264 ret = kvm_mips_set_count_resume(vcpu, v); 2265 break; 2266 case KVM_REG_MIPS_COUNT_HZ: 2267 ret = kvm_mips_set_count_hz(vcpu, v); 2268 break; 2269 default: 2270 return -EINVAL; 2271 } 2272 return ret; 2273 } 2274 2275 #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache) 2276 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu) 2277 { 2278 unsigned long guestid = guestid_cache(cpu); 2279 2280 if (!(++guestid & GUESTID_MASK)) { 2281 if (cpu_has_vtag_icache) 2282 flush_icache_all(); 2283 2284 if (!guestid) /* fix version if needed */ 2285 guestid = GUESTID_FIRST_VERSION; 2286 2287 ++guestid; /* guestid 0 reserved for root */ 2288 2289 /* start new guestid cycle */ 2290 kvm_vz_local_flush_roottlb_all_guests(); 2291 kvm_vz_local_flush_guesttlb_all(); 2292 } 2293 2294 guestid_cache(cpu) = guestid; 2295 } 2296 2297 /* Returns 1 if the guest TLB may be clobbered */ 2298 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu) 2299 { 2300 int ret = 0; 2301 int i; 2302 2303 if (!vcpu->requests) 2304 return 0; 2305 2306 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 2307 if (cpu_has_guestid) { 2308 /* Drop all GuestIDs for this VCPU */ 2309 for_each_possible_cpu(i) 2310 vcpu->arch.vzguestid[i] = 0; 2311 /* This will clobber guest TLB contents too */ 2312 ret = 1; 2313 } 2314 /* 2315 * For Root ASID Dealias (RAD) we don't do anything here, but we 2316 * still need the request to ensure we recheck asid_flush_mask. 2317 * We can still return 0 as only the root TLB will be affected 2318 * by a root ASID flush. 2319 */ 2320 } 2321 2322 return ret; 2323 } 2324 2325 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu) 2326 { 2327 unsigned int wired = read_gc0_wired(); 2328 struct kvm_mips_tlb *tlbs; 2329 int i; 2330 2331 /* Expand the wired TLB array if necessary */ 2332 wired &= MIPSR6_WIRED_WIRED; 2333 if (wired > vcpu->arch.wired_tlb_limit) { 2334 tlbs = krealloc(vcpu->arch.wired_tlb, wired * 2335 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); 2336 if (WARN_ON(!tlbs)) { 2337 /* Save whatever we can */ 2338 wired = vcpu->arch.wired_tlb_limit; 2339 } else { 2340 vcpu->arch.wired_tlb = tlbs; 2341 vcpu->arch.wired_tlb_limit = wired; 2342 } 2343 } 2344 2345 if (wired) 2346 /* Save wired entries from the guest TLB */ 2347 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); 2348 /* Invalidate any dropped entries since last time */ 2349 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { 2350 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 2351 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; 2352 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; 2353 vcpu->arch.wired_tlb[i].tlb_mask = 0; 2354 } 2355 vcpu->arch.wired_tlb_used = wired; 2356 } 2357 2358 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu) 2359 { 2360 /* Load wired entries into the guest TLB */ 2361 if (vcpu->arch.wired_tlb) 2362 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, 2363 vcpu->arch.wired_tlb_used); 2364 } 2365 2366 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) 2367 { 2368 struct kvm *kvm = vcpu->kvm; 2369 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; 2370 bool migrated; 2371 2372 /* 2373 * Are we entering guest context on a different CPU to last time? 2374 * If so, the VCPU's guest TLB state on this CPU may be stale. 2375 */ 2376 migrated = (vcpu->arch.last_exec_cpu != cpu); 2377 vcpu->arch.last_exec_cpu = cpu; 2378 2379 /* 2380 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and 2381 * remains set until another vcpu is loaded in. As a rule GuestRID 2382 * remains zeroed when in root context unless the kernel is busy 2383 * manipulating guest tlb entries. 2384 */ 2385 if (cpu_has_guestid) { 2386 /* 2387 * Check if our GuestID is of an older version and thus invalid. 2388 * 2389 * We also discard the stored GuestID if we've executed on 2390 * another CPU, as the guest mappings may have changed without 2391 * hypervisor knowledge. 2392 */ 2393 if (migrated || 2394 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & 2395 GUESTID_VERSION_MASK) { 2396 kvm_vz_get_new_guestid(cpu, vcpu); 2397 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); 2398 trace_kvm_guestid_change(vcpu, 2399 vcpu->arch.vzguestid[cpu]); 2400 } 2401 2402 /* Restore GuestID */ 2403 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); 2404 } else { 2405 /* 2406 * The Guest TLB only stores a single guest's TLB state, so 2407 * flush it if another VCPU has executed on this CPU. 2408 * 2409 * We also flush if we've executed on another CPU, as the guest 2410 * mappings may have changed without hypervisor knowledge. 2411 */ 2412 if (migrated || last_exec_vcpu[cpu] != vcpu) 2413 kvm_vz_local_flush_guesttlb_all(); 2414 last_exec_vcpu[cpu] = vcpu; 2415 2416 /* 2417 * Root ASID dealiases guest GPA mappings in the root TLB. 2418 * Allocate new root ASID if needed. 2419 */ 2420 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) 2421 || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) & 2422 asid_version_mask(cpu)) 2423 get_new_mmu_context(gpa_mm, cpu); 2424 } 2425 } 2426 2427 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2428 { 2429 struct mips_coproc *cop0 = vcpu->arch.cop0; 2430 bool migrated, all; 2431 2432 /* 2433 * Have we migrated to a different CPU? 2434 * If so, any old guest TLB state may be stale. 2435 */ 2436 migrated = (vcpu->arch.last_sched_cpu != cpu); 2437 2438 /* 2439 * Was this the last VCPU to run on this CPU? 2440 * If not, any old guest state from this VCPU will have been clobbered. 2441 */ 2442 all = migrated || (last_vcpu[cpu] != vcpu); 2443 last_vcpu[cpu] = vcpu; 2444 2445 /* 2446 * Restore CP0_Wired unconditionally as we clear it after use, and 2447 * restore wired guest TLB entries (while in guest context). 2448 */ 2449 kvm_restore_gc0_wired(cop0); 2450 if (current->flags & PF_VCPU) { 2451 tlbw_use_hazard(); 2452 kvm_vz_vcpu_load_tlb(vcpu, cpu); 2453 kvm_vz_vcpu_load_wired(vcpu); 2454 } 2455 2456 /* 2457 * Restore timer state regardless, as e.g. Cause.TI can change over time 2458 * if left unmaintained. 2459 */ 2460 kvm_vz_restore_timer(vcpu); 2461 2462 /* Don't bother restoring registers multiple times unless necessary */ 2463 if (!all) 2464 return 0; 2465 2466 /* 2467 * Restore config registers first, as some implementations restrict 2468 * writes to other registers when the corresponding feature bits aren't 2469 * set. For example Status.CU1 cannot be set unless Config1.FP is set. 2470 */ 2471 kvm_restore_gc0_config(cop0); 2472 if (cpu_guest_has_conf1) 2473 kvm_restore_gc0_config1(cop0); 2474 if (cpu_guest_has_conf2) 2475 kvm_restore_gc0_config2(cop0); 2476 if (cpu_guest_has_conf3) 2477 kvm_restore_gc0_config3(cop0); 2478 if (cpu_guest_has_conf4) 2479 kvm_restore_gc0_config4(cop0); 2480 if (cpu_guest_has_conf5) 2481 kvm_restore_gc0_config5(cop0); 2482 if (cpu_guest_has_conf6) 2483 kvm_restore_gc0_config6(cop0); 2484 if (cpu_guest_has_conf7) 2485 kvm_restore_gc0_config7(cop0); 2486 2487 kvm_restore_gc0_index(cop0); 2488 kvm_restore_gc0_entrylo0(cop0); 2489 kvm_restore_gc0_entrylo1(cop0); 2490 kvm_restore_gc0_context(cop0); 2491 if (cpu_guest_has_contextconfig) 2492 kvm_restore_gc0_contextconfig(cop0); 2493 #ifdef CONFIG_64BIT 2494 kvm_restore_gc0_xcontext(cop0); 2495 if (cpu_guest_has_contextconfig) 2496 kvm_restore_gc0_xcontextconfig(cop0); 2497 #endif 2498 kvm_restore_gc0_pagemask(cop0); 2499 kvm_restore_gc0_pagegrain(cop0); 2500 kvm_restore_gc0_hwrena(cop0); 2501 kvm_restore_gc0_badvaddr(cop0); 2502 kvm_restore_gc0_entryhi(cop0); 2503 kvm_restore_gc0_status(cop0); 2504 kvm_restore_gc0_intctl(cop0); 2505 kvm_restore_gc0_epc(cop0); 2506 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0)); 2507 if (cpu_guest_has_userlocal) 2508 kvm_restore_gc0_userlocal(cop0); 2509 2510 kvm_restore_gc0_errorepc(cop0); 2511 2512 /* restore KScratch registers if enabled in guest */ 2513 if (cpu_guest_has_conf4) { 2514 if (cpu_guest_has_kscr(2)) 2515 kvm_restore_gc0_kscratch1(cop0); 2516 if (cpu_guest_has_kscr(3)) 2517 kvm_restore_gc0_kscratch2(cop0); 2518 if (cpu_guest_has_kscr(4)) 2519 kvm_restore_gc0_kscratch3(cop0); 2520 if (cpu_guest_has_kscr(5)) 2521 kvm_restore_gc0_kscratch4(cop0); 2522 if (cpu_guest_has_kscr(6)) 2523 kvm_restore_gc0_kscratch5(cop0); 2524 if (cpu_guest_has_kscr(7)) 2525 kvm_restore_gc0_kscratch6(cop0); 2526 } 2527 2528 if (cpu_guest_has_badinstr) 2529 kvm_restore_gc0_badinstr(cop0); 2530 if (cpu_guest_has_badinstrp) 2531 kvm_restore_gc0_badinstrp(cop0); 2532 2533 if (cpu_guest_has_segments) { 2534 kvm_restore_gc0_segctl0(cop0); 2535 kvm_restore_gc0_segctl1(cop0); 2536 kvm_restore_gc0_segctl2(cop0); 2537 } 2538 2539 /* restore HTW registers */ 2540 if (cpu_guest_has_htw) { 2541 kvm_restore_gc0_pwbase(cop0); 2542 kvm_restore_gc0_pwfield(cop0); 2543 kvm_restore_gc0_pwsize(cop0); 2544 kvm_restore_gc0_pwctl(cop0); 2545 } 2546 2547 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */ 2548 if (cpu_has_guestctl2) 2549 write_c0_guestctl2( 2550 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); 2551 2552 /* 2553 * We should clear linked load bit to break interrupted atomics. This 2554 * prevents a SC on the next VCPU from succeeding by matching a LL on 2555 * the previous VCPU. 2556 */ 2557 if (cpu_guest_has_rw_llb) 2558 write_gc0_lladdr(0); 2559 2560 return 0; 2561 } 2562 2563 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) 2564 { 2565 struct mips_coproc *cop0 = vcpu->arch.cop0; 2566 2567 if (current->flags & PF_VCPU) 2568 kvm_vz_vcpu_save_wired(vcpu); 2569 2570 kvm_lose_fpu(vcpu); 2571 2572 kvm_save_gc0_index(cop0); 2573 kvm_save_gc0_entrylo0(cop0); 2574 kvm_save_gc0_entrylo1(cop0); 2575 kvm_save_gc0_context(cop0); 2576 if (cpu_guest_has_contextconfig) 2577 kvm_save_gc0_contextconfig(cop0); 2578 #ifdef CONFIG_64BIT 2579 kvm_save_gc0_xcontext(cop0); 2580 if (cpu_guest_has_contextconfig) 2581 kvm_save_gc0_xcontextconfig(cop0); 2582 #endif 2583 kvm_save_gc0_pagemask(cop0); 2584 kvm_save_gc0_pagegrain(cop0); 2585 kvm_save_gc0_wired(cop0); 2586 /* allow wired TLB entries to be overwritten */ 2587 clear_gc0_wired(MIPSR6_WIRED_WIRED); 2588 kvm_save_gc0_hwrena(cop0); 2589 kvm_save_gc0_badvaddr(cop0); 2590 kvm_save_gc0_entryhi(cop0); 2591 kvm_save_gc0_status(cop0); 2592 kvm_save_gc0_intctl(cop0); 2593 kvm_save_gc0_epc(cop0); 2594 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase()); 2595 if (cpu_guest_has_userlocal) 2596 kvm_save_gc0_userlocal(cop0); 2597 2598 /* only save implemented config registers */ 2599 kvm_save_gc0_config(cop0); 2600 if (cpu_guest_has_conf1) 2601 kvm_save_gc0_config1(cop0); 2602 if (cpu_guest_has_conf2) 2603 kvm_save_gc0_config2(cop0); 2604 if (cpu_guest_has_conf3) 2605 kvm_save_gc0_config3(cop0); 2606 if (cpu_guest_has_conf4) 2607 kvm_save_gc0_config4(cop0); 2608 if (cpu_guest_has_conf5) 2609 kvm_save_gc0_config5(cop0); 2610 if (cpu_guest_has_conf6) 2611 kvm_save_gc0_config6(cop0); 2612 if (cpu_guest_has_conf7) 2613 kvm_save_gc0_config7(cop0); 2614 2615 kvm_save_gc0_errorepc(cop0); 2616 2617 /* save KScratch registers if enabled in guest */ 2618 if (cpu_guest_has_conf4) { 2619 if (cpu_guest_has_kscr(2)) 2620 kvm_save_gc0_kscratch1(cop0); 2621 if (cpu_guest_has_kscr(3)) 2622 kvm_save_gc0_kscratch2(cop0); 2623 if (cpu_guest_has_kscr(4)) 2624 kvm_save_gc0_kscratch3(cop0); 2625 if (cpu_guest_has_kscr(5)) 2626 kvm_save_gc0_kscratch4(cop0); 2627 if (cpu_guest_has_kscr(6)) 2628 kvm_save_gc0_kscratch5(cop0); 2629 if (cpu_guest_has_kscr(7)) 2630 kvm_save_gc0_kscratch6(cop0); 2631 } 2632 2633 if (cpu_guest_has_badinstr) 2634 kvm_save_gc0_badinstr(cop0); 2635 if (cpu_guest_has_badinstrp) 2636 kvm_save_gc0_badinstrp(cop0); 2637 2638 if (cpu_guest_has_segments) { 2639 kvm_save_gc0_segctl0(cop0); 2640 kvm_save_gc0_segctl1(cop0); 2641 kvm_save_gc0_segctl2(cop0); 2642 } 2643 2644 /* save HTW registers if enabled in guest */ 2645 if (cpu_guest_has_htw && 2646 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) { 2647 kvm_save_gc0_pwbase(cop0); 2648 kvm_save_gc0_pwfield(cop0); 2649 kvm_save_gc0_pwsize(cop0); 2650 kvm_save_gc0_pwctl(cop0); 2651 } 2652 2653 kvm_vz_save_timer(vcpu); 2654 2655 /* save Root.GuestCtl2 in unused Guest guestctl2 register */ 2656 if (cpu_has_guestctl2) 2657 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 2658 read_c0_guestctl2(); 2659 2660 return 0; 2661 } 2662 2663 /** 2664 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB. 2665 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries). 2666 * 2667 * Attempt to resize the guest VTLB by writing guest Config registers. This is 2668 * necessary for cores with a shared root/guest TLB to avoid overlap with wired 2669 * entries in the root VTLB. 2670 * 2671 * Returns: The resulting guest VTLB size. 2672 */ 2673 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size) 2674 { 2675 unsigned int config4 = 0, ret = 0, limit; 2676 2677 /* Write MMUSize - 1 into guest Config registers */ 2678 if (cpu_guest_has_conf1) 2679 change_gc0_config1(MIPS_CONF1_TLBS, 2680 (size - 1) << MIPS_CONF1_TLBS_SHIFT); 2681 if (cpu_guest_has_conf4) { 2682 config4 = read_gc0_config4(); 2683 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 2684 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) { 2685 config4 &= ~MIPS_CONF4_VTLBSIZEEXT; 2686 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 2687 MIPS_CONF4_VTLBSIZEEXT_SHIFT; 2688 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 2689 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) { 2690 config4 &= ~MIPS_CONF4_MMUSIZEEXT; 2691 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 2692 MIPS_CONF4_MMUSIZEEXT_SHIFT; 2693 } 2694 write_gc0_config4(config4); 2695 } 2696 2697 /* 2698 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it 2699 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write 2700 * not dropped) 2701 */ 2702 if (cpu_has_mips_r6) { 2703 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >> 2704 MIPSR6_WIRED_LIMIT_SHIFT; 2705 if (size - 1 <= limit) 2706 limit = 0; 2707 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT); 2708 } 2709 2710 /* Read back MMUSize - 1 */ 2711 back_to_back_c0_hazard(); 2712 if (cpu_guest_has_conf1) 2713 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >> 2714 MIPS_CONF1_TLBS_SHIFT; 2715 if (config4) { 2716 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 2717 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) 2718 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> 2719 MIPS_CONF4_VTLBSIZEEXT_SHIFT) << 2720 MIPS_CONF1_TLBS_SIZE; 2721 else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 2722 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) 2723 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >> 2724 MIPS_CONF4_MMUSIZEEXT_SHIFT) << 2725 MIPS_CONF1_TLBS_SIZE; 2726 } 2727 return ret + 1; 2728 } 2729 2730 static int kvm_vz_hardware_enable(void) 2731 { 2732 unsigned int mmu_size, guest_mmu_size, ftlb_size; 2733 2734 /* 2735 * ImgTec cores tend to use a shared root/guest TLB. To avoid overlap of 2736 * root wired and guest entries, the guest TLB may need resizing. 2737 */ 2738 mmu_size = current_cpu_data.tlbsizevtlb; 2739 ftlb_size = current_cpu_data.tlbsize - mmu_size; 2740 2741 /* Try switching to maximum guest VTLB size for flush */ 2742 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size); 2743 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2744 kvm_vz_local_flush_guesttlb_all(); 2745 2746 /* 2747 * Reduce to make space for root wired entries and at least 2 root 2748 * non-wired entries. This does assume that long-term wired entries 2749 * won't be added later. 2750 */ 2751 guest_mmu_size = mmu_size - num_wired_entries() - 2; 2752 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size); 2753 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2754 2755 /* 2756 * Write the VTLB size, but if another CPU has already written, check it 2757 * matches or we won't provide a consistent view to the guest. If this 2758 * ever happens it suggests an asymmetric number of wired entries. 2759 */ 2760 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) && 2761 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size, 2762 "Available guest VTLB size mismatch")) 2763 return -EINVAL; 2764 2765 /* 2766 * Enable virtualization features granting guest direct control of 2767 * certain features: 2768 * CP0=1: Guest coprocessor 0 context. 2769 * AT=Guest: Guest MMU. 2770 * CG=1: Hit (virtual address) CACHE operations (optional). 2771 * CF=1: Guest Config registers. 2772 * CGI=1: Indexed flush CACHE operations (optional). 2773 */ 2774 write_c0_guestctl0(MIPS_GCTL0_CP0 | 2775 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) | 2776 MIPS_GCTL0_CG | MIPS_GCTL0_CF); 2777 if (cpu_has_guestctl0ext) 2778 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); 2779 2780 if (cpu_has_guestid) { 2781 write_c0_guestctl1(0); 2782 kvm_vz_local_flush_roottlb_all_guests(); 2783 2784 GUESTID_MASK = current_cpu_data.guestid_mask; 2785 GUESTID_FIRST_VERSION = GUESTID_MASK + 1; 2786 GUESTID_VERSION_MASK = ~GUESTID_MASK; 2787 2788 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION; 2789 } 2790 2791 /* clear any pending injected virtual guest interrupts */ 2792 if (cpu_has_guestctl2) 2793 clear_c0_guestctl2(0x3f << 10); 2794 2795 return 0; 2796 } 2797 2798 static void kvm_vz_hardware_disable(void) 2799 { 2800 kvm_vz_local_flush_guesttlb_all(); 2801 2802 if (cpu_has_guestid) { 2803 write_c0_guestctl1(0); 2804 kvm_vz_local_flush_roottlb_all_guests(); 2805 } 2806 } 2807 2808 static int kvm_vz_check_extension(struct kvm *kvm, long ext) 2809 { 2810 int r; 2811 2812 switch (ext) { 2813 case KVM_CAP_MIPS_VZ: 2814 /* we wouldn't be here unless cpu_has_vz */ 2815 r = 1; 2816 break; 2817 #ifdef CONFIG_64BIT 2818 case KVM_CAP_MIPS_64BIT: 2819 /* We support 64-bit registers/operations and addresses */ 2820 r = 2; 2821 break; 2822 #endif 2823 default: 2824 r = 0; 2825 break; 2826 } 2827 2828 return r; 2829 } 2830 2831 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu) 2832 { 2833 int i; 2834 2835 for_each_possible_cpu(i) 2836 vcpu->arch.vzguestid[i] = 0; 2837 2838 return 0; 2839 } 2840 2841 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu) 2842 { 2843 int cpu; 2844 2845 /* 2846 * If the VCPU is freed and reused as another VCPU, we don't want the 2847 * matching pointer wrongly hanging around in last_vcpu[] or 2848 * last_exec_vcpu[]. 2849 */ 2850 for_each_possible_cpu(cpu) { 2851 if (last_vcpu[cpu] == vcpu) 2852 last_vcpu[cpu] = NULL; 2853 if (last_exec_vcpu[cpu] == vcpu) 2854 last_exec_vcpu[cpu] = NULL; 2855 } 2856 } 2857 2858 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) 2859 { 2860 struct mips_coproc *cop0 = vcpu->arch.cop0; 2861 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ 2862 2863 /* 2864 * Start off the timer at the same frequency as the host timer, but the 2865 * soft timer doesn't handle frequencies greater than 1GHz yet. 2866 */ 2867 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC) 2868 count_hz = mips_hpt_frequency; 2869 kvm_mips_init_count(vcpu, count_hz); 2870 2871 /* 2872 * Initialize guest register state to valid architectural reset state. 2873 */ 2874 2875 /* PageGrain */ 2876 if (cpu_has_mips_r6) 2877 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC); 2878 /* Wired */ 2879 if (cpu_has_mips_r6) 2880 kvm_write_sw_gc0_wired(cop0, 2881 read_gc0_wired() & MIPSR6_WIRED_LIMIT); 2882 /* Status */ 2883 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL); 2884 if (cpu_has_mips_r6) 2885 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status()); 2886 /* IntCtl */ 2887 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() & 2888 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI)); 2889 /* PRId */ 2890 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id); 2891 /* EBase */ 2892 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); 2893 /* Config */ 2894 kvm_save_gc0_config(cop0); 2895 /* architecturally writable (e.g. from guest) */ 2896 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK, 2897 _page_cachable_default >> _CACHE_SHIFT); 2898 /* architecturally read only, but maybe writable from root */ 2899 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config()); 2900 if (cpu_guest_has_conf1) { 2901 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M); 2902 /* Config1 */ 2903 kvm_save_gc0_config1(cop0); 2904 /* architecturally read only, but maybe writable from root */ 2905 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 | 2906 MIPS_CONF1_MD | 2907 MIPS_CONF1_PC | 2908 MIPS_CONF1_WR | 2909 MIPS_CONF1_CA | 2910 MIPS_CONF1_FP); 2911 } 2912 if (cpu_guest_has_conf2) { 2913 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M); 2914 /* Config2 */ 2915 kvm_save_gc0_config2(cop0); 2916 } 2917 if (cpu_guest_has_conf3) { 2918 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M); 2919 /* Config3 */ 2920 kvm_save_gc0_config3(cop0); 2921 /* architecturally writable (e.g. from guest) */ 2922 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE); 2923 /* architecturally read only, but maybe writable from root */ 2924 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA | 2925 MIPS_CONF3_BPG | 2926 MIPS_CONF3_ULRI | 2927 MIPS_CONF3_DSP | 2928 MIPS_CONF3_CTXTC | 2929 MIPS_CONF3_ITL | 2930 MIPS_CONF3_LPA | 2931 MIPS_CONF3_VEIC | 2932 MIPS_CONF3_VINT | 2933 MIPS_CONF3_SP | 2934 MIPS_CONF3_CDMM | 2935 MIPS_CONF3_MT | 2936 MIPS_CONF3_SM | 2937 MIPS_CONF3_TL); 2938 } 2939 if (cpu_guest_has_conf4) { 2940 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M); 2941 /* Config4 */ 2942 kvm_save_gc0_config4(cop0); 2943 } 2944 if (cpu_guest_has_conf5) { 2945 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M); 2946 /* Config5 */ 2947 kvm_save_gc0_config5(cop0); 2948 /* architecturally writable (e.g. from guest) */ 2949 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K | 2950 MIPS_CONF5_CV | 2951 MIPS_CONF5_MSAEN | 2952 MIPS_CONF5_UFE | 2953 MIPS_CONF5_FRE | 2954 MIPS_CONF5_SBRI | 2955 MIPS_CONF5_UFR); 2956 /* architecturally read only, but maybe writable from root */ 2957 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP); 2958 } 2959 2960 if (cpu_guest_has_contextconfig) { 2961 /* ContextConfig */ 2962 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0); 2963 #ifdef CONFIG_64BIT 2964 /* XContextConfig */ 2965 /* bits SEGBITS-13+3:4 set */ 2966 kvm_write_sw_gc0_xcontextconfig(cop0, 2967 ((1ull << (cpu_vmbits - 13)) - 1) << 4); 2968 #endif 2969 } 2970 2971 /* Implementation dependent, use the legacy layout */ 2972 if (cpu_guest_has_segments) { 2973 /* SegCtl0, SegCtl1, SegCtl2 */ 2974 kvm_write_sw_gc0_segctl0(cop0, 0x00200010); 2975 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 | 2976 (_page_cachable_default >> _CACHE_SHIFT) << 2977 (16 + MIPS_SEGCFG_C_SHIFT)); 2978 kvm_write_sw_gc0_segctl2(cop0, 0x00380438); 2979 } 2980 2981 /* reset HTW registers */ 2982 if (cpu_guest_has_htw && cpu_has_mips_r6) { 2983 /* PWField */ 2984 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302); 2985 /* PWSize */ 2986 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT); 2987 } 2988 2989 /* start with no pending virtual guest interrupts */ 2990 if (cpu_has_guestctl2) 2991 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; 2992 2993 /* Put PC at reset vector */ 2994 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); 2995 2996 return 0; 2997 } 2998 2999 static void kvm_vz_flush_shadow_all(struct kvm *kvm) 3000 { 3001 if (cpu_has_guestid) { 3002 /* Flush GuestID for each VCPU individually */ 3003 kvm_flush_remote_tlbs(kvm); 3004 } else { 3005 /* 3006 * For each CPU there is a single GPA ASID used by all VCPUs in 3007 * the VM, so it doesn't make sense for the VCPUs to handle 3008 * invalidation of these ASIDs individually. 3009 * 3010 * Instead mark all CPUs as needing ASID invalidation in 3011 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to 3012 * kick any running VCPUs so they check asid_flush_mask. 3013 */ 3014 cpumask_setall(&kvm->arch.asid_flush_mask); 3015 kvm_flush_remote_tlbs(kvm); 3016 } 3017 } 3018 3019 static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, 3020 const struct kvm_memory_slot *slot) 3021 { 3022 kvm_vz_flush_shadow_all(kvm); 3023 } 3024 3025 static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu) 3026 { 3027 int cpu = smp_processor_id(); 3028 int preserve_guest_tlb; 3029 3030 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu); 3031 3032 if (preserve_guest_tlb) 3033 kvm_vz_vcpu_save_wired(vcpu); 3034 3035 kvm_vz_vcpu_load_tlb(vcpu, cpu); 3036 3037 if (preserve_guest_tlb) 3038 kvm_vz_vcpu_load_wired(vcpu); 3039 } 3040 3041 static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) 3042 { 3043 int cpu = smp_processor_id(); 3044 int r; 3045 3046 kvm_vz_acquire_htimer(vcpu); 3047 /* Check if we have any exceptions/interrupts pending */ 3048 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause()); 3049 3050 kvm_vz_check_requests(vcpu, cpu); 3051 kvm_vz_vcpu_load_tlb(vcpu, cpu); 3052 kvm_vz_vcpu_load_wired(vcpu); 3053 3054 r = vcpu->arch.vcpu_run(run, vcpu); 3055 3056 kvm_vz_vcpu_save_wired(vcpu); 3057 3058 return r; 3059 } 3060 3061 static struct kvm_mips_callbacks kvm_vz_callbacks = { 3062 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable, 3063 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss, 3064 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss, 3065 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss, 3066 .handle_addr_err_st = kvm_trap_vz_no_handler, 3067 .handle_addr_err_ld = kvm_trap_vz_no_handler, 3068 .handle_syscall = kvm_trap_vz_no_handler, 3069 .handle_res_inst = kvm_trap_vz_no_handler, 3070 .handle_break = kvm_trap_vz_no_handler, 3071 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled, 3072 .handle_guest_exit = kvm_trap_vz_handle_guest_exit, 3073 3074 .hardware_enable = kvm_vz_hardware_enable, 3075 .hardware_disable = kvm_vz_hardware_disable, 3076 .check_extension = kvm_vz_check_extension, 3077 .vcpu_init = kvm_vz_vcpu_init, 3078 .vcpu_uninit = kvm_vz_vcpu_uninit, 3079 .vcpu_setup = kvm_vz_vcpu_setup, 3080 .flush_shadow_all = kvm_vz_flush_shadow_all, 3081 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot, 3082 .gva_to_gpa = kvm_vz_gva_to_gpa_cb, 3083 .queue_timer_int = kvm_vz_queue_timer_int_cb, 3084 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb, 3085 .queue_io_int = kvm_vz_queue_io_int_cb, 3086 .dequeue_io_int = kvm_vz_dequeue_io_int_cb, 3087 .irq_deliver = kvm_vz_irq_deliver_cb, 3088 .irq_clear = kvm_vz_irq_clear_cb, 3089 .num_regs = kvm_vz_num_regs, 3090 .copy_reg_indices = kvm_vz_copy_reg_indices, 3091 .get_one_reg = kvm_vz_get_one_reg, 3092 .set_one_reg = kvm_vz_set_one_reg, 3093 .vcpu_load = kvm_vz_vcpu_load, 3094 .vcpu_put = kvm_vz_vcpu_put, 3095 .vcpu_run = kvm_vz_vcpu_run, 3096 .vcpu_reenter = kvm_vz_vcpu_reenter, 3097 }; 3098 3099 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) 3100 { 3101 if (!cpu_has_vz) 3102 return -ENODEV; 3103 3104 /* 3105 * VZ requires at least 2 KScratch registers, so it should have been 3106 * possible to allocate pgd_reg. 3107 */ 3108 if (WARN(pgd_reg == -1, 3109 "pgd_reg not allocated even though cpu_has_vz\n")) 3110 return -ENODEV; 3111 3112 pr_info("Starting KVM with MIPS VZ extensions\n"); 3113 3114 *install_callbacks = &kvm_vz_callbacks; 3115 return 0; 3116 } 3117