1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: Support for hardware virtualization extensions 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Yann Le Du <ledu@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/preempt.h> 16 #include <linux/vmalloc.h> 17 #include <asm/cacheflush.h> 18 #include <asm/cacheops.h> 19 #include <asm/cmpxchg.h> 20 #include <asm/fpu.h> 21 #include <asm/hazards.h> 22 #include <asm/inst.h> 23 #include <asm/mmu_context.h> 24 #include <asm/r4kcache.h> 25 #include <asm/time.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbex.h> 28 29 #include <linux/kvm_host.h> 30 31 #include "interrupt.h" 32 #include "loongson_regs.h" 33 34 #include "trace.h" 35 36 /* Pointers to last VCPU loaded on each physical CPU */ 37 static struct kvm_vcpu *last_vcpu[NR_CPUS]; 38 /* Pointers to last VCPU executed on each physical CPU */ 39 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS]; 40 41 /* 42 * Number of guest VTLB entries to use, so we can catch inconsistency between 43 * CPUs. 44 */ 45 static unsigned int kvm_vz_guest_vtlb_size; 46 47 static inline long kvm_vz_read_gc0_ebase(void) 48 { 49 if (sizeof(long) == 8 && cpu_has_ebase_wg) 50 return read_gc0_ebase_64(); 51 else 52 return read_gc0_ebase(); 53 } 54 55 static inline void kvm_vz_write_gc0_ebase(long v) 56 { 57 /* 58 * First write with WG=1 to write upper bits, then write again in case 59 * WG should be left at 0. 60 * write_gc0_ebase_64() is no longer UNDEFINED since R6. 61 */ 62 if (sizeof(long) == 8 && 63 (cpu_has_mips64r6 || cpu_has_ebase_wg)) { 64 write_gc0_ebase_64(v | MIPS_EBASE_WG); 65 write_gc0_ebase_64(v); 66 } else { 67 write_gc0_ebase(v | MIPS_EBASE_WG); 68 write_gc0_ebase(v); 69 } 70 } 71 72 /* 73 * These Config bits may be writable by the guest: 74 * Config: [K23, KU] (!TLB), K0 75 * Config1: (none) 76 * Config2: [TU, SU] (impl) 77 * Config3: ISAOnExc 78 * Config4: FTLBPageSize 79 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR 80 */ 81 82 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu) 83 { 84 return CONF_CM_CMASK; 85 } 86 87 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu) 88 { 89 return 0; 90 } 91 92 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu) 93 { 94 return 0; 95 } 96 97 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu) 98 { 99 return MIPS_CONF3_ISA_OE; 100 } 101 102 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu) 103 { 104 /* no need to be exact */ 105 return MIPS_CONF4_VFTLBPAGESIZE; 106 } 107 108 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu) 109 { 110 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI; 111 112 /* Permit MSAEn changes if MSA supported and enabled */ 113 if (kvm_mips_guest_has_msa(&vcpu->arch)) 114 mask |= MIPS_CONF5_MSAEN; 115 116 /* 117 * Permit guest FPU mode changes if FPU is enabled and the relevant 118 * feature exists according to FIR register. 119 */ 120 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 121 if (cpu_has_ufr) 122 mask |= MIPS_CONF5_UFR; 123 if (cpu_has_fre) 124 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE; 125 } 126 127 return mask; 128 } 129 130 /* 131 * VZ optionally allows these additional Config bits to be written by root: 132 * Config: M, [MT] 133 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP 134 * Config2: M 135 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC, 136 * VInt, SP, CDMM, MT, SM, TL] 137 * Config4: M, [VTLBSizeExt, MMUSizeExt] 138 * Config5: MRP 139 */ 140 141 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu) 142 { 143 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M; 144 } 145 146 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu) 147 { 148 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M; 149 150 /* Permit FPU to be present if FPU is supported */ 151 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 152 mask |= MIPS_CONF1_FP; 153 154 return mask; 155 } 156 157 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu) 158 { 159 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M; 160 } 161 162 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu) 163 { 164 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M | 165 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC; 166 167 /* Permit MSA to be present if MSA is supported */ 168 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 169 mask |= MIPS_CONF3_MSA; 170 171 return mask; 172 } 173 174 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu) 175 { 176 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M; 177 } 178 179 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu) 180 { 181 return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP; 182 } 183 184 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) 185 { 186 /* VZ guest has already converted gva to gpa */ 187 return gva; 188 } 189 190 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 191 { 192 set_bit(priority, &vcpu->arch.pending_exceptions); 193 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 194 } 195 196 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 197 { 198 clear_bit(priority, &vcpu->arch.pending_exceptions); 199 set_bit(priority, &vcpu->arch.pending_exceptions_clr); 200 } 201 202 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu) 203 { 204 /* 205 * timer expiry is asynchronous to vcpu execution therefore defer guest 206 * cp0 accesses 207 */ 208 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 209 } 210 211 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) 212 { 213 /* 214 * timer expiry is asynchronous to vcpu execution therefore defer guest 215 * cp0 accesses 216 */ 217 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); 218 } 219 220 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu, 221 struct kvm_mips_interrupt *irq) 222 { 223 int intr = (int)irq->irq; 224 225 /* 226 * interrupts are asynchronous to vcpu execution therefore defer guest 227 * cp0 accesses 228 */ 229 kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr)); 230 } 231 232 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 233 struct kvm_mips_interrupt *irq) 234 { 235 int intr = (int)irq->irq; 236 237 /* 238 * interrupts are asynchronous to vcpu execution therefore defer guest 239 * cp0 accesses 240 */ 241 kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); 242 } 243 244 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 245 u32 cause) 246 { 247 u32 irq = (priority < MIPS_EXC_MAX) ? 248 kvm_priority_to_irq[priority] : 0; 249 250 switch (priority) { 251 case MIPS_EXC_INT_TIMER: 252 set_gc0_cause(C_TI); 253 break; 254 255 case MIPS_EXC_INT_IO_1: 256 case MIPS_EXC_INT_IO_2: 257 case MIPS_EXC_INT_IPI_1: 258 case MIPS_EXC_INT_IPI_2: 259 if (cpu_has_guestctl2) 260 set_c0_guestctl2(irq); 261 else 262 set_gc0_cause(irq); 263 break; 264 265 default: 266 break; 267 } 268 269 clear_bit(priority, &vcpu->arch.pending_exceptions); 270 return 1; 271 } 272 273 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 274 u32 cause) 275 { 276 u32 irq = (priority < MIPS_EXC_MAX) ? 277 kvm_priority_to_irq[priority] : 0; 278 279 switch (priority) { 280 case MIPS_EXC_INT_TIMER: 281 /* 282 * Call to kvm_write_c0_guest_compare() clears Cause.TI in 283 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with 284 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not 285 * supported or if not using GuestCtl2 Hardware Clear. 286 */ 287 if (cpu_has_guestctl2) { 288 if (!(read_c0_guestctl2() & (irq << 14))) 289 clear_c0_guestctl2(irq); 290 } else { 291 clear_gc0_cause(irq); 292 } 293 break; 294 295 case MIPS_EXC_INT_IO_1: 296 case MIPS_EXC_INT_IO_2: 297 case MIPS_EXC_INT_IPI_1: 298 case MIPS_EXC_INT_IPI_2: 299 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ 300 if (cpu_has_guestctl2) { 301 if (!(read_c0_guestctl2() & (irq << 14))) 302 clear_c0_guestctl2(irq); 303 } else { 304 clear_gc0_cause(irq); 305 } 306 break; 307 308 default: 309 break; 310 } 311 312 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); 313 return 1; 314 } 315 316 /* 317 * VZ guest timer handling. 318 */ 319 320 /** 321 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer. 322 * @vcpu: Virtual CPU. 323 * 324 * Returns: true if the VZ GTOffset & real guest CP0_Count should be used 325 * instead of software emulation of guest timer. 326 * false otherwise. 327 */ 328 static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu) 329 { 330 if (kvm_mips_count_disabled(vcpu)) 331 return false; 332 333 /* Chosen frequency must match real frequency */ 334 if (mips_hpt_frequency != vcpu->arch.count_hz) 335 return false; 336 337 /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */ 338 if (current_cpu_data.gtoffset_mask != 0xffffffff) 339 return false; 340 341 return true; 342 } 343 344 /** 345 * _kvm_vz_restore_stimer() - Restore soft timer state. 346 * @vcpu: Virtual CPU. 347 * @compare: CP0_Compare register value, restored by caller. 348 * @cause: CP0_Cause register to restore. 349 * 350 * Restore VZ state relating to the soft timer. The hard timer can be enabled 351 * later. 352 */ 353 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare, 354 u32 cause) 355 { 356 /* 357 * Avoid spurious counter interrupts by setting Guest CP0_Count to just 358 * after Guest CP0_Compare. 359 */ 360 write_c0_gtoffset(compare - read_c0_count()); 361 362 back_to_back_c0_hazard(); 363 write_gc0_cause(cause); 364 } 365 366 /** 367 * _kvm_vz_restore_htimer() - Restore hard timer state. 368 * @vcpu: Virtual CPU. 369 * @compare: CP0_Compare register value, restored by caller. 370 * @cause: CP0_Cause register to restore. 371 * 372 * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the 373 * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause. 374 */ 375 static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu, 376 u32 compare, u32 cause) 377 { 378 u32 start_count, after_count; 379 ktime_t freeze_time; 380 unsigned long flags; 381 382 /* 383 * Freeze the soft-timer and sync the guest CP0_Count with it. We do 384 * this with interrupts disabled to avoid latency. 385 */ 386 local_irq_save(flags); 387 freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count); 388 write_c0_gtoffset(start_count - read_c0_count()); 389 local_irq_restore(flags); 390 391 /* restore guest CP0_Cause, as TI may already be set */ 392 back_to_back_c0_hazard(); 393 write_gc0_cause(cause); 394 395 /* 396 * The above sequence isn't atomic and would result in lost timer 397 * interrupts if we're not careful. Detect if a timer interrupt is due 398 * and assert it. 399 */ 400 back_to_back_c0_hazard(); 401 after_count = read_gc0_count(); 402 if (after_count - start_count > compare - start_count - 1) 403 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 404 } 405 406 /** 407 * kvm_vz_restore_timer() - Restore timer state. 408 * @vcpu: Virtual CPU. 409 * 410 * Restore soft timer state from saved context. 411 */ 412 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) 413 { 414 struct mips_coproc *cop0 = vcpu->arch.cop0; 415 u32 cause, compare; 416 417 compare = kvm_read_sw_gc0_compare(cop0); 418 cause = kvm_read_sw_gc0_cause(cop0); 419 420 write_gc0_compare(compare); 421 _kvm_vz_restore_stimer(vcpu, compare, cause); 422 } 423 424 /** 425 * kvm_vz_acquire_htimer() - Switch to hard timer state. 426 * @vcpu: Virtual CPU. 427 * 428 * Restore hard timer state on top of existing soft timer state if possible. 429 * 430 * Since hard timer won't remain active over preemption, preemption should be 431 * disabled by the caller. 432 */ 433 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) 434 { 435 u32 gctl0; 436 437 gctl0 = read_c0_guestctl0(); 438 if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) { 439 /* enable guest access to hard timer */ 440 write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT); 441 442 _kvm_vz_restore_htimer(vcpu, read_gc0_compare(), 443 read_gc0_cause()); 444 } 445 } 446 447 /** 448 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer. 449 * @vcpu: Virtual CPU. 450 * @compare: Pointer to write compare value to. 451 * @cause: Pointer to write cause value to. 452 * 453 * Save VZ guest timer state and switch to software emulation of guest CP0 454 * timer. The hard timer must already be in use, so preemption should be 455 * disabled. 456 */ 457 static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu, 458 u32 *out_compare, u32 *out_cause) 459 { 460 u32 cause, compare, before_count, end_count; 461 ktime_t before_time; 462 463 compare = read_gc0_compare(); 464 *out_compare = compare; 465 466 before_time = ktime_get(); 467 468 /* 469 * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time 470 * at which no pending timer interrupt is missing. 471 */ 472 before_count = read_gc0_count(); 473 back_to_back_c0_hazard(); 474 cause = read_gc0_cause(); 475 *out_cause = cause; 476 477 /* 478 * Record a final CP0_Count which we will transfer to the soft-timer. 479 * This is recorded *after* saving CP0_Cause, so we don't get any timer 480 * interrupts from just after the final CP0_Count point. 481 */ 482 back_to_back_c0_hazard(); 483 end_count = read_gc0_count(); 484 485 /* 486 * The above sequence isn't atomic, so we could miss a timer interrupt 487 * between reading CP0_Cause and end_count. Detect and record any timer 488 * interrupt due between before_count and end_count. 489 */ 490 if (end_count - before_count > compare - before_count - 1) 491 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 492 493 /* 494 * Restore soft-timer, ignoring a small amount of negative drift due to 495 * delay between freeze_hrtimer and setting CP0_GTOffset. 496 */ 497 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000); 498 } 499 500 /** 501 * kvm_vz_save_timer() - Save guest timer state. 502 * @vcpu: Virtual CPU. 503 * 504 * Save VZ guest timer state and switch to soft guest timer if hard timer was in 505 * use. 506 */ 507 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) 508 { 509 struct mips_coproc *cop0 = vcpu->arch.cop0; 510 u32 gctl0, compare, cause; 511 512 gctl0 = read_c0_guestctl0(); 513 if (gctl0 & MIPS_GCTL0_GT) { 514 /* disable guest use of hard timer */ 515 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT); 516 517 /* save hard timer state */ 518 _kvm_vz_save_htimer(vcpu, &compare, &cause); 519 } else { 520 compare = read_gc0_compare(); 521 cause = read_gc0_cause(); 522 } 523 524 /* save timer-related state to VCPU context */ 525 kvm_write_sw_gc0_cause(cop0, cause); 526 kvm_write_sw_gc0_compare(cop0, compare); 527 } 528 529 /** 530 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use. 531 * @vcpu: Virtual CPU. 532 * 533 * Transfers the state of the hard guest timer to the soft guest timer, leaving 534 * guest state intact so it can continue to be used with the soft timer. 535 */ 536 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) 537 { 538 u32 gctl0, compare, cause; 539 540 preempt_disable(); 541 gctl0 = read_c0_guestctl0(); 542 if (gctl0 & MIPS_GCTL0_GT) { 543 /* disable guest use of timer */ 544 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT); 545 546 /* switch to soft timer */ 547 _kvm_vz_save_htimer(vcpu, &compare, &cause); 548 549 /* leave soft timer in usable state */ 550 _kvm_vz_restore_stimer(vcpu, compare, cause); 551 } 552 preempt_enable(); 553 } 554 555 /** 556 * is_eva_access() - Find whether an instruction is an EVA memory accessor. 557 * @inst: 32-bit instruction encoding. 558 * 559 * Finds whether @inst encodes an EVA memory access instruction, which would 560 * indicate that emulation of it should access the user mode address space 561 * instead of the kernel mode address space. This matters for MUSUK segments 562 * which are TLB mapped for user mode but unmapped for kernel mode. 563 * 564 * Returns: Whether @inst encodes an EVA accessor instruction. 565 */ 566 static bool is_eva_access(union mips_instruction inst) 567 { 568 if (inst.spec3_format.opcode != spec3_op) 569 return false; 570 571 switch (inst.spec3_format.func) { 572 case lwle_op: 573 case lwre_op: 574 case cachee_op: 575 case sbe_op: 576 case she_op: 577 case sce_op: 578 case swe_op: 579 case swle_op: 580 case swre_op: 581 case prefe_op: 582 case lbue_op: 583 case lhue_op: 584 case lbe_op: 585 case lhe_op: 586 case lle_op: 587 case lwe_op: 588 return true; 589 default: 590 return false; 591 } 592 } 593 594 /** 595 * is_eva_am_mapped() - Find whether an access mode is mapped. 596 * @vcpu: KVM VCPU state. 597 * @am: 3-bit encoded access mode. 598 * @eu: Segment becomes unmapped and uncached when Status.ERL=1. 599 * 600 * Decode @am to find whether it encodes a mapped segment for the current VCPU 601 * state. Where necessary @eu and the actual instruction causing the fault are 602 * taken into account to make the decision. 603 * 604 * Returns: Whether the VCPU faulted on a TLB mapped address. 605 */ 606 static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu) 607 { 608 u32 am_lookup; 609 int err; 610 611 /* 612 * Interpret access control mode. We assume address errors will already 613 * have been caught by the guest, leaving us with: 614 * AM UM SM KM 31..24 23..16 615 * UK 0 000 Unm 0 0 616 * MK 1 001 TLB 1 617 * MSK 2 010 TLB TLB 1 618 * MUSK 3 011 TLB TLB TLB 1 619 * MUSUK 4 100 TLB TLB Unm 0 1 620 * USK 5 101 Unm Unm 0 0 621 * - 6 110 0 0 622 * UUSK 7 111 Unm Unm Unm 0 0 623 * 624 * We shift a magic value by AM across the sign bit to find if always 625 * TLB mapped, and if not shift by 8 again to find if it depends on KM. 626 */ 627 am_lookup = 0x70080000 << am; 628 if ((s32)am_lookup < 0) { 629 /* 630 * MK, MSK, MUSK 631 * Always TLB mapped, unless SegCtl.EU && ERL 632 */ 633 if (!eu || !(read_gc0_status() & ST0_ERL)) 634 return true; 635 } else { 636 am_lookup <<= 8; 637 if ((s32)am_lookup < 0) { 638 union mips_instruction inst; 639 unsigned int status; 640 u32 *opc; 641 642 /* 643 * MUSUK 644 * TLB mapped if not in kernel mode 645 */ 646 status = read_gc0_status(); 647 if (!(status & (ST0_EXL | ST0_ERL)) && 648 (status & ST0_KSU)) 649 return true; 650 /* 651 * EVA access instructions in kernel 652 * mode access user address space. 653 */ 654 opc = (u32 *)vcpu->arch.pc; 655 if (vcpu->arch.host_cp0_cause & CAUSEF_BD) 656 opc += 1; 657 err = kvm_get_badinstr(opc, vcpu, &inst.word); 658 if (!err && is_eva_access(inst)) 659 return true; 660 } 661 } 662 663 return false; 664 } 665 666 /** 667 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA. 668 * @vcpu: KVM VCPU state. 669 * @gva: Guest virtual address to convert. 670 * @gpa: Output guest physical address. 671 * 672 * Convert a guest virtual address (GVA) which is valid according to the guest 673 * context, to a guest physical address (GPA). 674 * 675 * Returns: 0 on success. 676 * -errno on failure. 677 */ 678 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 679 unsigned long *gpa) 680 { 681 u32 gva32 = gva; 682 unsigned long segctl; 683 684 if ((long)gva == (s32)gva32) { 685 /* Handle canonical 32-bit virtual address */ 686 if (cpu_guest_has_segments) { 687 unsigned long mask, pa; 688 689 switch (gva32 >> 29) { 690 case 0: 691 case 1: /* CFG5 (1GB) */ 692 segctl = read_gc0_segctl2() >> 16; 693 mask = (unsigned long)0xfc0000000ull; 694 break; 695 case 2: 696 case 3: /* CFG4 (1GB) */ 697 segctl = read_gc0_segctl2(); 698 mask = (unsigned long)0xfc0000000ull; 699 break; 700 case 4: /* CFG3 (512MB) */ 701 segctl = read_gc0_segctl1() >> 16; 702 mask = (unsigned long)0xfe0000000ull; 703 break; 704 case 5: /* CFG2 (512MB) */ 705 segctl = read_gc0_segctl1(); 706 mask = (unsigned long)0xfe0000000ull; 707 break; 708 case 6: /* CFG1 (512MB) */ 709 segctl = read_gc0_segctl0() >> 16; 710 mask = (unsigned long)0xfe0000000ull; 711 break; 712 case 7: /* CFG0 (512MB) */ 713 segctl = read_gc0_segctl0(); 714 mask = (unsigned long)0xfe0000000ull; 715 break; 716 default: 717 /* 718 * GCC 4.9 isn't smart enough to figure out that 719 * segctl and mask are always initialised. 720 */ 721 unreachable(); 722 } 723 724 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7, 725 segctl & 0x0008)) 726 goto tlb_mapped; 727 728 /* Unmapped, find guest physical address */ 729 pa = (segctl << 20) & mask; 730 pa |= gva32 & ~mask; 731 *gpa = pa; 732 return 0; 733 } else if ((s32)gva32 < (s32)0xc0000000) { 734 /* legacy unmapped KSeg0 or KSeg1 */ 735 *gpa = gva32 & 0x1fffffff; 736 return 0; 737 } 738 #ifdef CONFIG_64BIT 739 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { 740 /* XKPHYS */ 741 if (cpu_guest_has_segments) { 742 /* 743 * Each of the 8 regions can be overridden by SegCtl2.XR 744 * to use SegCtl1.XAM. 745 */ 746 segctl = read_gc0_segctl2(); 747 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { 748 segctl = read_gc0_segctl1(); 749 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7, 750 0)) 751 goto tlb_mapped; 752 } 753 754 } 755 /* 756 * Traditionally fully unmapped. 757 * Bits 61:59 specify the CCA, which we can just mask off here. 758 * Bits 58:PABITS should be zero, but we shouldn't have got here 759 * if it wasn't. 760 */ 761 *gpa = gva & 0x07ffffffffffffff; 762 return 0; 763 #endif 764 } 765 766 tlb_mapped: 767 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); 768 } 769 770 /** 771 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA. 772 * @vcpu: KVM VCPU state. 773 * @badvaddr: Root BadVAddr. 774 * @gpa: Output guest physical address. 775 * 776 * VZ implementations are permitted to report guest virtual addresses (GVA) in 777 * BadVAddr on a root exception during guest execution, instead of the more 778 * convenient guest physical addresses (GPA). When we get a GVA, this function 779 * converts it to a GPA, taking into account guest segmentation and guest TLB 780 * state. 781 * 782 * Returns: 0 on success. 783 * -errno on failure. 784 */ 785 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr, 786 unsigned long *gpa) 787 { 788 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & 789 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 790 791 /* If BadVAddr is GPA, then all is well in the world */ 792 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) { 793 *gpa = badvaddr; 794 return 0; 795 } 796 797 /* Otherwise we'd expect it to be GVA ... */ 798 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA, 799 "Unexpected gexccode %#x\n", gexccode)) 800 return -EINVAL; 801 802 /* ... and we need to perform the GVA->GPA translation in software */ 803 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa); 804 } 805 806 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu) 807 { 808 u32 *opc = (u32 *) vcpu->arch.pc; 809 u32 cause = vcpu->arch.host_cp0_cause; 810 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 811 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 812 u32 inst = 0; 813 814 /* 815 * Fetch the instruction. 816 */ 817 if (cause & CAUSEF_BD) 818 opc += 1; 819 kvm_get_badinstr(opc, vcpu, &inst); 820 821 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", 822 exccode, opc, inst, badvaddr, 823 read_gc0_status()); 824 kvm_arch_vcpu_dump_regs(vcpu); 825 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 826 return RESUME_HOST; 827 } 828 829 static unsigned long mips_process_maar(unsigned int op, unsigned long val) 830 { 831 /* Mask off unused bits */ 832 unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL; 833 834 if (read_gc0_pagegrain() & PG_ELPA) 835 mask |= 0x00ffffff00000000ull; 836 if (cpu_guest_has_mvh) 837 mask |= MIPS_MAAR_VH; 838 839 /* Set or clear VH */ 840 if (op == mtc_op) { 841 /* clear VH */ 842 val &= ~MIPS_MAAR_VH; 843 } else if (op == dmtc_op) { 844 /* set VH to match VL */ 845 val &= ~MIPS_MAAR_VH; 846 if (val & MIPS_MAAR_VL) 847 val |= MIPS_MAAR_VH; 848 } 849 850 return val & mask; 851 } 852 853 static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val) 854 { 855 struct mips_coproc *cop0 = vcpu->arch.cop0; 856 857 val &= MIPS_MAARI_INDEX; 858 if (val == MIPS_MAARI_INDEX) 859 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1); 860 else if (val < ARRAY_SIZE(vcpu->arch.maar)) 861 kvm_write_sw_gc0_maari(cop0, val); 862 } 863 864 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, 865 u32 *opc, u32 cause, 866 struct kvm_run *run, 867 struct kvm_vcpu *vcpu) 868 { 869 struct mips_coproc *cop0 = vcpu->arch.cop0; 870 enum emulation_result er = EMULATE_DONE; 871 u32 rt, rd, sel; 872 unsigned long curr_pc; 873 unsigned long val; 874 875 /* 876 * Update PC and hold onto current PC in case there is 877 * an error and we want to rollback the PC 878 */ 879 curr_pc = vcpu->arch.pc; 880 er = update_pc(vcpu, cause); 881 if (er == EMULATE_FAIL) 882 return er; 883 884 if (inst.co_format.co) { 885 switch (inst.co_format.func) { 886 case wait_op: 887 er = kvm_mips_emul_wait(vcpu); 888 break; 889 default: 890 er = EMULATE_FAIL; 891 } 892 } else { 893 rt = inst.c0r_format.rt; 894 rd = inst.c0r_format.rd; 895 sel = inst.c0r_format.sel; 896 897 switch (inst.c0r_format.rs) { 898 case dmfc_op: 899 case mfc_op: 900 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 901 cop0->stat[rd][sel]++; 902 #endif 903 if (rd == MIPS_CP0_COUNT && 904 sel == 0) { /* Count */ 905 val = kvm_mips_read_count(vcpu); 906 } else if (rd == MIPS_CP0_COMPARE && 907 sel == 0) { /* Compare */ 908 val = read_gc0_compare(); 909 } else if (rd == MIPS_CP0_LLADDR && 910 sel == 0) { /* LLAddr */ 911 if (cpu_guest_has_rw_llb) 912 val = read_gc0_lladdr() & 913 MIPS_LLADDR_LLB; 914 else 915 val = 0; 916 } else if (rd == MIPS_CP0_LLADDR && 917 sel == 1 && /* MAAR */ 918 cpu_guest_has_maar && 919 !cpu_guest_has_dyn_maar) { 920 /* MAARI must be in range */ 921 BUG_ON(kvm_read_sw_gc0_maari(cop0) >= 922 ARRAY_SIZE(vcpu->arch.maar)); 923 val = vcpu->arch.maar[ 924 kvm_read_sw_gc0_maari(cop0)]; 925 } else if ((rd == MIPS_CP0_PRID && 926 (sel == 0 || /* PRid */ 927 sel == 2 || /* CDMMBase */ 928 sel == 3)) || /* CMGCRBase */ 929 (rd == MIPS_CP0_STATUS && 930 (sel == 2 || /* SRSCtl */ 931 sel == 3)) || /* SRSMap */ 932 (rd == MIPS_CP0_CONFIG && 933 (sel == 7)) || /* Config7 */ 934 (rd == MIPS_CP0_LLADDR && 935 (sel == 2) && /* MAARI */ 936 cpu_guest_has_maar && 937 !cpu_guest_has_dyn_maar) || 938 (rd == MIPS_CP0_ERRCTL && 939 (sel == 0))) { /* ErrCtl */ 940 val = cop0->reg[rd][sel]; 941 } else { 942 val = 0; 943 er = EMULATE_FAIL; 944 } 945 946 if (er != EMULATE_FAIL) { 947 /* Sign extend */ 948 if (inst.c0r_format.rs == mfc_op) 949 val = (int)val; 950 vcpu->arch.gprs[rt] = val; 951 } 952 953 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ? 954 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0, 955 KVM_TRACE_COP0(rd, sel), val); 956 break; 957 958 case dmtc_op: 959 case mtc_op: 960 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 961 cop0->stat[rd][sel]++; 962 #endif 963 val = vcpu->arch.gprs[rt]; 964 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ? 965 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0, 966 KVM_TRACE_COP0(rd, sel), val); 967 968 if (rd == MIPS_CP0_COUNT && 969 sel == 0) { /* Count */ 970 kvm_vz_lose_htimer(vcpu); 971 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 972 } else if (rd == MIPS_CP0_COMPARE && 973 sel == 0) { /* Compare */ 974 kvm_mips_write_compare(vcpu, 975 vcpu->arch.gprs[rt], 976 true); 977 } else if (rd == MIPS_CP0_LLADDR && 978 sel == 0) { /* LLAddr */ 979 /* 980 * P5600 generates GPSI on guest MTC0 LLAddr. 981 * Only allow the guest to clear LLB. 982 */ 983 if (cpu_guest_has_rw_llb && 984 !(val & MIPS_LLADDR_LLB)) 985 write_gc0_lladdr(0); 986 } else if (rd == MIPS_CP0_LLADDR && 987 sel == 1 && /* MAAR */ 988 cpu_guest_has_maar && 989 !cpu_guest_has_dyn_maar) { 990 val = mips_process_maar(inst.c0r_format.rs, 991 val); 992 993 /* MAARI must be in range */ 994 BUG_ON(kvm_read_sw_gc0_maari(cop0) >= 995 ARRAY_SIZE(vcpu->arch.maar)); 996 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] = 997 val; 998 } else if (rd == MIPS_CP0_LLADDR && 999 (sel == 2) && /* MAARI */ 1000 cpu_guest_has_maar && 1001 !cpu_guest_has_dyn_maar) { 1002 kvm_write_maari(vcpu, val); 1003 } else if (rd == MIPS_CP0_ERRCTL && 1004 (sel == 0)) { /* ErrCtl */ 1005 /* ignore the written value */ 1006 } else { 1007 er = EMULATE_FAIL; 1008 } 1009 break; 1010 1011 default: 1012 er = EMULATE_FAIL; 1013 break; 1014 } 1015 } 1016 /* Rollback PC only if emulation was unsuccessful */ 1017 if (er == EMULATE_FAIL) { 1018 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n", 1019 curr_pc, __func__, inst.word); 1020 1021 vcpu->arch.pc = curr_pc; 1022 } 1023 1024 return er; 1025 } 1026 1027 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, 1028 u32 *opc, u32 cause, 1029 struct kvm_run *run, 1030 struct kvm_vcpu *vcpu) 1031 { 1032 enum emulation_result er = EMULATE_DONE; 1033 u32 cache, op_inst, op, base; 1034 s16 offset; 1035 struct kvm_vcpu_arch *arch = &vcpu->arch; 1036 unsigned long va, curr_pc; 1037 1038 /* 1039 * Update PC and hold onto current PC in case there is 1040 * an error and we want to rollback the PC 1041 */ 1042 curr_pc = vcpu->arch.pc; 1043 er = update_pc(vcpu, cause); 1044 if (er == EMULATE_FAIL) 1045 return er; 1046 1047 base = inst.i_format.rs; 1048 op_inst = inst.i_format.rt; 1049 if (cpu_has_mips_r6) 1050 offset = inst.spec3_format.simmediate; 1051 else 1052 offset = inst.i_format.simmediate; 1053 cache = op_inst & CacheOp_Cache; 1054 op = op_inst & CacheOp_Op; 1055 1056 va = arch->gprs[base] + offset; 1057 1058 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1059 cache, op, base, arch->gprs[base], offset); 1060 1061 /* Secondary or tirtiary cache ops ignored */ 1062 if (cache != Cache_I && cache != Cache_D) 1063 return EMULATE_DONE; 1064 1065 switch (op_inst) { 1066 case Index_Invalidate_I: 1067 flush_icache_line_indexed(va); 1068 return EMULATE_DONE; 1069 case Index_Writeback_Inv_D: 1070 flush_dcache_line_indexed(va); 1071 return EMULATE_DONE; 1072 case Hit_Invalidate_I: 1073 case Hit_Invalidate_D: 1074 case Hit_Writeback_Inv_D: 1075 if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) { 1076 /* We can just flush entire icache */ 1077 local_flush_icache_range(0, 0); 1078 return EMULATE_DONE; 1079 } 1080 1081 /* So far, other platforms support guest hit cache ops */ 1082 break; 1083 default: 1084 break; 1085 } 1086 1087 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1088 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], 1089 offset); 1090 /* Rollback PC */ 1091 vcpu->arch.pc = curr_pc; 1092 1093 return EMULATE_FAIL; 1094 } 1095 1096 #ifdef CONFIG_CPU_LOONGSON64 1097 static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst, 1098 u32 *opc, u32 cause, 1099 struct kvm_run *run, 1100 struct kvm_vcpu *vcpu) 1101 { 1102 unsigned int rs, rd; 1103 unsigned int hostcfg; 1104 unsigned long curr_pc; 1105 enum emulation_result er = EMULATE_DONE; 1106 1107 /* 1108 * Update PC and hold onto current PC in case there is 1109 * an error and we want to rollback the PC 1110 */ 1111 curr_pc = vcpu->arch.pc; 1112 er = update_pc(vcpu, cause); 1113 if (er == EMULATE_FAIL) 1114 return er; 1115 1116 rs = inst.loongson3_lscsr_format.rs; 1117 rd = inst.loongson3_lscsr_format.rd; 1118 switch (inst.loongson3_lscsr_format.fr) { 1119 case 0x8: /* Read CPUCFG */ 1120 ++vcpu->stat.vz_cpucfg_exits; 1121 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]); 1122 1123 switch (vcpu->arch.gprs[rs]) { 1124 case LOONGSON_CFG0: 1125 vcpu->arch.gprs[rd] = 0x14c000; 1126 break; 1127 case LOONGSON_CFG1: 1128 hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI | 1129 LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 | 1130 LOONGSON_CFG1_SFBP); 1131 vcpu->arch.gprs[rd] = hostcfg; 1132 break; 1133 case LOONGSON_CFG2: 1134 hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 | 1135 LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW); 1136 vcpu->arch.gprs[rd] = hostcfg; 1137 break; 1138 case LOONGSON_CFG3: 1139 vcpu->arch.gprs[rd] = hostcfg; 1140 break; 1141 default: 1142 /* Don't export any other advanced features to guest */ 1143 vcpu->arch.gprs[rd] = 0; 1144 break; 1145 } 1146 break; 1147 1148 default: 1149 kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n", 1150 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc); 1151 er = EMULATE_FAIL; 1152 break; 1153 } 1154 1155 /* Rollback PC only if emulation was unsuccessful */ 1156 if (er == EMULATE_FAIL) { 1157 kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n", 1158 curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr); 1159 1160 vcpu->arch.pc = curr_pc; 1161 } 1162 1163 return er; 1164 } 1165 #endif 1166 1167 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, 1168 struct kvm_vcpu *vcpu) 1169 { 1170 enum emulation_result er = EMULATE_DONE; 1171 struct kvm_vcpu_arch *arch = &vcpu->arch; 1172 struct kvm_run *run = vcpu->run; 1173 union mips_instruction inst; 1174 int rd, rt, sel; 1175 int err; 1176 1177 /* 1178 * Fetch the instruction. 1179 */ 1180 if (cause & CAUSEF_BD) 1181 opc += 1; 1182 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1183 if (err) 1184 return EMULATE_FAIL; 1185 1186 switch (inst.r_format.opcode) { 1187 case cop0_op: 1188 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu); 1189 break; 1190 #ifndef CONFIG_CPU_MIPSR6 1191 case cache_op: 1192 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1193 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 1194 break; 1195 #endif 1196 #ifdef CONFIG_CPU_LOONGSON64 1197 case lwc2_op: 1198 er = kvm_vz_gpsi_lwc2(inst, opc, cause, run, vcpu); 1199 break; 1200 #endif 1201 case spec3_op: 1202 switch (inst.spec3_format.func) { 1203 #ifdef CONFIG_CPU_MIPSR6 1204 case cache6_op: 1205 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1206 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); 1207 break; 1208 #endif 1209 case rdhwr_op: 1210 if (inst.r_format.rs || (inst.r_format.re >> 3)) 1211 goto unknown; 1212 1213 rd = inst.r_format.rd; 1214 rt = inst.r_format.rt; 1215 sel = inst.r_format.re & 0x7; 1216 1217 switch (rd) { 1218 case MIPS_HWR_CC: /* Read count register */ 1219 arch->gprs[rt] = 1220 (long)(int)kvm_mips_read_count(vcpu); 1221 break; 1222 default: 1223 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 1224 KVM_TRACE_HWR(rd, sel), 0); 1225 goto unknown; 1226 } 1227 1228 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, 1229 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); 1230 1231 er = update_pc(vcpu, cause); 1232 break; 1233 default: 1234 goto unknown; 1235 } 1236 break; 1237 unknown: 1238 1239 default: 1240 kvm_err("GPSI exception not supported (%p/%#x)\n", 1241 opc, inst.word); 1242 kvm_arch_vcpu_dump_regs(vcpu); 1243 er = EMULATE_FAIL; 1244 break; 1245 } 1246 1247 return er; 1248 } 1249 1250 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc, 1251 struct kvm_vcpu *vcpu) 1252 { 1253 enum emulation_result er = EMULATE_DONE; 1254 struct kvm_vcpu_arch *arch = &vcpu->arch; 1255 union mips_instruction inst; 1256 int err; 1257 1258 /* 1259 * Fetch the instruction. 1260 */ 1261 if (cause & CAUSEF_BD) 1262 opc += 1; 1263 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1264 if (err) 1265 return EMULATE_FAIL; 1266 1267 /* complete MTC0 on behalf of guest and advance EPC */ 1268 if (inst.c0r_format.opcode == cop0_op && 1269 inst.c0r_format.rs == mtc_op && 1270 inst.c0r_format.z == 0) { 1271 int rt = inst.c0r_format.rt; 1272 int rd = inst.c0r_format.rd; 1273 int sel = inst.c0r_format.sel; 1274 unsigned int val = arch->gprs[rt]; 1275 unsigned int old_val, change; 1276 1277 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel), 1278 val); 1279 1280 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1281 /* FR bit should read as zero if no FPU */ 1282 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1283 val &= ~(ST0_CU1 | ST0_FR); 1284 1285 /* 1286 * Also don't allow FR to be set if host doesn't support 1287 * it. 1288 */ 1289 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) 1290 val &= ~ST0_FR; 1291 1292 old_val = read_gc0_status(); 1293 change = val ^ old_val; 1294 1295 if (change & ST0_FR) { 1296 /* 1297 * FPU and Vector register state is made 1298 * UNPREDICTABLE by a change of FR, so don't 1299 * even bother saving it. 1300 */ 1301 kvm_drop_fpu(vcpu); 1302 } 1303 1304 /* 1305 * If MSA state is already live, it is undefined how it 1306 * interacts with FR=0 FPU state, and we don't want to 1307 * hit reserved instruction exceptions trying to save 1308 * the MSA state later when CU=1 && FR=1, so play it 1309 * safe and save it first. 1310 */ 1311 if (change & ST0_CU1 && !(val & ST0_FR) && 1312 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1313 kvm_lose_fpu(vcpu); 1314 1315 write_gc0_status(val); 1316 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1317 u32 old_cause = read_gc0_cause(); 1318 u32 change = old_cause ^ val; 1319 1320 /* DC bit enabling/disabling timer? */ 1321 if (change & CAUSEF_DC) { 1322 if (val & CAUSEF_DC) { 1323 kvm_vz_lose_htimer(vcpu); 1324 kvm_mips_count_disable_cause(vcpu); 1325 } else { 1326 kvm_mips_count_enable_cause(vcpu); 1327 } 1328 } 1329 1330 /* Only certain bits are RW to the guest */ 1331 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP | 1332 CAUSEF_IP0 | CAUSEF_IP1); 1333 1334 /* WP can only be cleared */ 1335 change &= ~CAUSEF_WP | old_cause; 1336 1337 write_gc0_cause(old_cause ^ change); 1338 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */ 1339 write_gc0_intctl(val); 1340 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 1341 old_val = read_gc0_config5(); 1342 change = val ^ old_val; 1343 /* Handle changes in FPU/MSA modes */ 1344 preempt_disable(); 1345 1346 /* 1347 * Propagate FRE changes immediately if the FPU 1348 * context is already loaded. 1349 */ 1350 if (change & MIPS_CONF5_FRE && 1351 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1352 change_c0_config5(MIPS_CONF5_FRE, val); 1353 1354 preempt_enable(); 1355 1356 val = old_val ^ 1357 (change & kvm_vz_config5_guest_wrmask(vcpu)); 1358 write_gc0_config5(val); 1359 } else { 1360 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n", 1361 opc, inst.word); 1362 er = EMULATE_FAIL; 1363 } 1364 1365 if (er != EMULATE_FAIL) 1366 er = update_pc(vcpu, cause); 1367 } else { 1368 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n", 1369 opc, inst.word); 1370 er = EMULATE_FAIL; 1371 } 1372 1373 return er; 1374 } 1375 1376 static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc, 1377 struct kvm_vcpu *vcpu) 1378 { 1379 /* 1380 * Presumably this is due to MC (guest mode change), so lets trace some 1381 * relevant info. 1382 */ 1383 trace_kvm_guest_mode_change(vcpu); 1384 1385 return EMULATE_DONE; 1386 } 1387 1388 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc, 1389 struct kvm_vcpu *vcpu) 1390 { 1391 enum emulation_result er; 1392 union mips_instruction inst; 1393 unsigned long curr_pc; 1394 int err; 1395 1396 if (cause & CAUSEF_BD) 1397 opc += 1; 1398 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1399 if (err) 1400 return EMULATE_FAIL; 1401 1402 /* 1403 * Update PC and hold onto current PC in case there is 1404 * an error and we want to rollback the PC 1405 */ 1406 curr_pc = vcpu->arch.pc; 1407 er = update_pc(vcpu, cause); 1408 if (er == EMULATE_FAIL) 1409 return er; 1410 1411 er = kvm_mips_emul_hypcall(vcpu, inst); 1412 if (er == EMULATE_FAIL) 1413 vcpu->arch.pc = curr_pc; 1414 1415 return er; 1416 } 1417 1418 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode, 1419 u32 cause, 1420 u32 *opc, 1421 struct kvm_vcpu *vcpu) 1422 { 1423 u32 inst; 1424 1425 /* 1426 * Fetch the instruction. 1427 */ 1428 if (cause & CAUSEF_BD) 1429 opc += 1; 1430 kvm_get_badinstr(opc, vcpu, &inst); 1431 1432 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n", 1433 gexccode, opc, inst, read_gc0_status()); 1434 1435 return EMULATE_FAIL; 1436 } 1437 1438 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu) 1439 { 1440 u32 *opc = (u32 *) vcpu->arch.pc; 1441 u32 cause = vcpu->arch.host_cp0_cause; 1442 enum emulation_result er = EMULATE_DONE; 1443 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & 1444 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; 1445 int ret = RESUME_GUEST; 1446 1447 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode); 1448 switch (gexccode) { 1449 case MIPS_GCTL0_GEXC_GPSI: 1450 ++vcpu->stat.vz_gpsi_exits; 1451 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu); 1452 break; 1453 case MIPS_GCTL0_GEXC_GSFC: 1454 ++vcpu->stat.vz_gsfc_exits; 1455 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu); 1456 break; 1457 case MIPS_GCTL0_GEXC_HC: 1458 ++vcpu->stat.vz_hc_exits; 1459 er = kvm_trap_vz_handle_hc(cause, opc, vcpu); 1460 break; 1461 case MIPS_GCTL0_GEXC_GRR: 1462 ++vcpu->stat.vz_grr_exits; 1463 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1464 vcpu); 1465 break; 1466 case MIPS_GCTL0_GEXC_GVA: 1467 ++vcpu->stat.vz_gva_exits; 1468 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1469 vcpu); 1470 break; 1471 case MIPS_GCTL0_GEXC_GHFC: 1472 ++vcpu->stat.vz_ghfc_exits; 1473 er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu); 1474 break; 1475 case MIPS_GCTL0_GEXC_GPA: 1476 ++vcpu->stat.vz_gpa_exits; 1477 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1478 vcpu); 1479 break; 1480 default: 1481 ++vcpu->stat.vz_resvd_exits; 1482 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, 1483 vcpu); 1484 break; 1485 1486 } 1487 1488 if (er == EMULATE_DONE) { 1489 ret = RESUME_GUEST; 1490 } else if (er == EMULATE_HYPERCALL) { 1491 ret = kvm_mips_handle_hypcall(vcpu); 1492 } else { 1493 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1494 ret = RESUME_HOST; 1495 } 1496 return ret; 1497 } 1498 1499 /** 1500 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor. 1501 * @vcpu: Virtual CPU context. 1502 * 1503 * Handle when the guest attempts to use a coprocessor which hasn't been allowed 1504 * by the root context. 1505 */ 1506 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) 1507 { 1508 struct kvm_run *run = vcpu->run; 1509 u32 cause = vcpu->arch.host_cp0_cause; 1510 enum emulation_result er = EMULATE_FAIL; 1511 int ret = RESUME_GUEST; 1512 1513 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { 1514 /* 1515 * If guest FPU not present, the FPU operation should have been 1516 * treated as a reserved instruction! 1517 * If FPU already in use, we shouldn't get this at all. 1518 */ 1519 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || 1520 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { 1521 preempt_enable(); 1522 return EMULATE_FAIL; 1523 } 1524 1525 kvm_own_fpu(vcpu); 1526 er = EMULATE_DONE; 1527 } 1528 /* other coprocessors not handled */ 1529 1530 switch (er) { 1531 case EMULATE_DONE: 1532 ret = RESUME_GUEST; 1533 break; 1534 1535 case EMULATE_FAIL: 1536 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1537 ret = RESUME_HOST; 1538 break; 1539 1540 default: 1541 BUG(); 1542 } 1543 return ret; 1544 } 1545 1546 /** 1547 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root. 1548 * @vcpu: Virtual CPU context. 1549 * 1550 * Handle when the guest attempts to use MSA when it is disabled in the root 1551 * context. 1552 */ 1553 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) 1554 { 1555 struct kvm_run *run = vcpu->run; 1556 1557 /* 1558 * If MSA not present or not exposed to guest or FR=0, the MSA operation 1559 * should have been treated as a reserved instruction! 1560 * Same if CU1=1, FR=0. 1561 * If MSA already in use, we shouldn't get this at all. 1562 */ 1563 if (!kvm_mips_guest_has_msa(&vcpu->arch) || 1564 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 || 1565 !(read_gc0_config5() & MIPS_CONF5_MSAEN) || 1566 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { 1567 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1568 return RESUME_HOST; 1569 } 1570 1571 kvm_own_msa(vcpu); 1572 1573 return RESUME_GUEST; 1574 } 1575 1576 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) 1577 { 1578 struct kvm_run *run = vcpu->run; 1579 u32 *opc = (u32 *) vcpu->arch.pc; 1580 u32 cause = vcpu->arch.host_cp0_cause; 1581 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1582 union mips_instruction inst; 1583 enum emulation_result er = EMULATE_DONE; 1584 int err, ret = RESUME_GUEST; 1585 1586 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) { 1587 /* A code fetch fault doesn't count as an MMIO */ 1588 if (kvm_is_ifetch_fault(&vcpu->arch)) { 1589 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1590 return RESUME_HOST; 1591 } 1592 1593 /* Fetch the instruction */ 1594 if (cause & CAUSEF_BD) 1595 opc += 1; 1596 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1597 if (err) { 1598 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1599 return RESUME_HOST; 1600 } 1601 1602 /* Treat as MMIO */ 1603 er = kvm_mips_emulate_load(inst, cause, run, vcpu); 1604 if (er == EMULATE_FAIL) { 1605 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1606 opc, badvaddr); 1607 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1608 } 1609 } 1610 1611 if (er == EMULATE_DONE) { 1612 ret = RESUME_GUEST; 1613 } else if (er == EMULATE_DO_MMIO) { 1614 run->exit_reason = KVM_EXIT_MMIO; 1615 ret = RESUME_HOST; 1616 } else { 1617 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1618 ret = RESUME_HOST; 1619 } 1620 return ret; 1621 } 1622 1623 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu) 1624 { 1625 struct kvm_run *run = vcpu->run; 1626 u32 *opc = (u32 *) vcpu->arch.pc; 1627 u32 cause = vcpu->arch.host_cp0_cause; 1628 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; 1629 union mips_instruction inst; 1630 enum emulation_result er = EMULATE_DONE; 1631 int err; 1632 int ret = RESUME_GUEST; 1633 1634 /* Just try the access again if we couldn't do the translation */ 1635 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr)) 1636 return RESUME_GUEST; 1637 vcpu->arch.host_cp0_badvaddr = badvaddr; 1638 1639 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) { 1640 /* Fetch the instruction */ 1641 if (cause & CAUSEF_BD) 1642 opc += 1; 1643 err = kvm_get_badinstr(opc, vcpu, &inst.word); 1644 if (err) { 1645 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1646 return RESUME_HOST; 1647 } 1648 1649 /* Treat as MMIO */ 1650 er = kvm_mips_emulate_store(inst, cause, run, vcpu); 1651 if (er == EMULATE_FAIL) { 1652 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", 1653 opc, badvaddr); 1654 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1655 } 1656 } 1657 1658 if (er == EMULATE_DONE) { 1659 ret = RESUME_GUEST; 1660 } else if (er == EMULATE_DO_MMIO) { 1661 run->exit_reason = KVM_EXIT_MMIO; 1662 ret = RESUME_HOST; 1663 } else { 1664 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1665 ret = RESUME_HOST; 1666 } 1667 return ret; 1668 } 1669 1670 static u64 kvm_vz_get_one_regs[] = { 1671 KVM_REG_MIPS_CP0_INDEX, 1672 KVM_REG_MIPS_CP0_ENTRYLO0, 1673 KVM_REG_MIPS_CP0_ENTRYLO1, 1674 KVM_REG_MIPS_CP0_CONTEXT, 1675 KVM_REG_MIPS_CP0_PAGEMASK, 1676 KVM_REG_MIPS_CP0_PAGEGRAIN, 1677 KVM_REG_MIPS_CP0_WIRED, 1678 KVM_REG_MIPS_CP0_HWRENA, 1679 KVM_REG_MIPS_CP0_BADVADDR, 1680 KVM_REG_MIPS_CP0_COUNT, 1681 KVM_REG_MIPS_CP0_ENTRYHI, 1682 KVM_REG_MIPS_CP0_COMPARE, 1683 KVM_REG_MIPS_CP0_STATUS, 1684 KVM_REG_MIPS_CP0_INTCTL, 1685 KVM_REG_MIPS_CP0_CAUSE, 1686 KVM_REG_MIPS_CP0_EPC, 1687 KVM_REG_MIPS_CP0_PRID, 1688 KVM_REG_MIPS_CP0_EBASE, 1689 KVM_REG_MIPS_CP0_CONFIG, 1690 KVM_REG_MIPS_CP0_CONFIG1, 1691 KVM_REG_MIPS_CP0_CONFIG2, 1692 KVM_REG_MIPS_CP0_CONFIG3, 1693 KVM_REG_MIPS_CP0_CONFIG4, 1694 KVM_REG_MIPS_CP0_CONFIG5, 1695 #ifdef CONFIG_64BIT 1696 KVM_REG_MIPS_CP0_XCONTEXT, 1697 #endif 1698 KVM_REG_MIPS_CP0_ERROREPC, 1699 1700 KVM_REG_MIPS_COUNT_CTL, 1701 KVM_REG_MIPS_COUNT_RESUME, 1702 KVM_REG_MIPS_COUNT_HZ, 1703 }; 1704 1705 static u64 kvm_vz_get_one_regs_contextconfig[] = { 1706 KVM_REG_MIPS_CP0_CONTEXTCONFIG, 1707 #ifdef CONFIG_64BIT 1708 KVM_REG_MIPS_CP0_XCONTEXTCONFIG, 1709 #endif 1710 }; 1711 1712 static u64 kvm_vz_get_one_regs_segments[] = { 1713 KVM_REG_MIPS_CP0_SEGCTL0, 1714 KVM_REG_MIPS_CP0_SEGCTL1, 1715 KVM_REG_MIPS_CP0_SEGCTL2, 1716 }; 1717 1718 static u64 kvm_vz_get_one_regs_htw[] = { 1719 KVM_REG_MIPS_CP0_PWBASE, 1720 KVM_REG_MIPS_CP0_PWFIELD, 1721 KVM_REG_MIPS_CP0_PWSIZE, 1722 KVM_REG_MIPS_CP0_PWCTL, 1723 }; 1724 1725 static u64 kvm_vz_get_one_regs_kscratch[] = { 1726 KVM_REG_MIPS_CP0_KSCRATCH1, 1727 KVM_REG_MIPS_CP0_KSCRATCH2, 1728 KVM_REG_MIPS_CP0_KSCRATCH3, 1729 KVM_REG_MIPS_CP0_KSCRATCH4, 1730 KVM_REG_MIPS_CP0_KSCRATCH5, 1731 KVM_REG_MIPS_CP0_KSCRATCH6, 1732 }; 1733 1734 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu) 1735 { 1736 unsigned long ret; 1737 1738 ret = ARRAY_SIZE(kvm_vz_get_one_regs); 1739 if (cpu_guest_has_userlocal) 1740 ++ret; 1741 if (cpu_guest_has_badinstr) 1742 ++ret; 1743 if (cpu_guest_has_badinstrp) 1744 ++ret; 1745 if (cpu_guest_has_contextconfig) 1746 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); 1747 if (cpu_guest_has_segments) 1748 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments); 1749 if (cpu_guest_has_htw || cpu_guest_has_ldpte) 1750 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw); 1751 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) 1752 ret += 1 + ARRAY_SIZE(vcpu->arch.maar); 1753 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask); 1754 1755 return ret; 1756 } 1757 1758 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) 1759 { 1760 u64 index; 1761 unsigned int i; 1762 1763 if (copy_to_user(indices, kvm_vz_get_one_regs, 1764 sizeof(kvm_vz_get_one_regs))) 1765 return -EFAULT; 1766 indices += ARRAY_SIZE(kvm_vz_get_one_regs); 1767 1768 if (cpu_guest_has_userlocal) { 1769 index = KVM_REG_MIPS_CP0_USERLOCAL; 1770 if (copy_to_user(indices, &index, sizeof(index))) 1771 return -EFAULT; 1772 ++indices; 1773 } 1774 if (cpu_guest_has_badinstr) { 1775 index = KVM_REG_MIPS_CP0_BADINSTR; 1776 if (copy_to_user(indices, &index, sizeof(index))) 1777 return -EFAULT; 1778 ++indices; 1779 } 1780 if (cpu_guest_has_badinstrp) { 1781 index = KVM_REG_MIPS_CP0_BADINSTRP; 1782 if (copy_to_user(indices, &index, sizeof(index))) 1783 return -EFAULT; 1784 ++indices; 1785 } 1786 if (cpu_guest_has_contextconfig) { 1787 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig, 1788 sizeof(kvm_vz_get_one_regs_contextconfig))) 1789 return -EFAULT; 1790 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); 1791 } 1792 if (cpu_guest_has_segments) { 1793 if (copy_to_user(indices, kvm_vz_get_one_regs_segments, 1794 sizeof(kvm_vz_get_one_regs_segments))) 1795 return -EFAULT; 1796 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments); 1797 } 1798 if (cpu_guest_has_htw || cpu_guest_has_ldpte) { 1799 if (copy_to_user(indices, kvm_vz_get_one_regs_htw, 1800 sizeof(kvm_vz_get_one_regs_htw))) 1801 return -EFAULT; 1802 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw); 1803 } 1804 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) { 1805 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) { 1806 index = KVM_REG_MIPS_CP0_MAAR(i); 1807 if (copy_to_user(indices, &index, sizeof(index))) 1808 return -EFAULT; 1809 ++indices; 1810 } 1811 1812 index = KVM_REG_MIPS_CP0_MAARI; 1813 if (copy_to_user(indices, &index, sizeof(index))) 1814 return -EFAULT; 1815 ++indices; 1816 } 1817 for (i = 0; i < 6; ++i) { 1818 if (!cpu_guest_has_kscr(i + 2)) 1819 continue; 1820 1821 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i], 1822 sizeof(kvm_vz_get_one_regs_kscratch[i]))) 1823 return -EFAULT; 1824 ++indices; 1825 } 1826 1827 return 0; 1828 } 1829 1830 static inline s64 entrylo_kvm_to_user(unsigned long v) 1831 { 1832 s64 mask, ret = v; 1833 1834 if (BITS_PER_LONG == 32) { 1835 /* 1836 * KVM API exposes 64-bit version of the register, so move the 1837 * RI/XI bits up into place. 1838 */ 1839 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1840 ret &= ~mask; 1841 ret |= ((s64)v & mask) << 32; 1842 } 1843 return ret; 1844 } 1845 1846 static inline unsigned long entrylo_user_to_kvm(s64 v) 1847 { 1848 unsigned long mask, ret = v; 1849 1850 if (BITS_PER_LONG == 32) { 1851 /* 1852 * KVM API exposes 64-bit versiono of the register, so move the 1853 * RI/XI bits down into place. 1854 */ 1855 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; 1856 ret &= ~mask; 1857 ret |= (v >> 32) & mask; 1858 } 1859 return ret; 1860 } 1861 1862 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, 1863 const struct kvm_one_reg *reg, 1864 s64 *v) 1865 { 1866 struct mips_coproc *cop0 = vcpu->arch.cop0; 1867 unsigned int idx; 1868 1869 switch (reg->id) { 1870 case KVM_REG_MIPS_CP0_INDEX: 1871 *v = (long)read_gc0_index(); 1872 break; 1873 case KVM_REG_MIPS_CP0_ENTRYLO0: 1874 *v = entrylo_kvm_to_user(read_gc0_entrylo0()); 1875 break; 1876 case KVM_REG_MIPS_CP0_ENTRYLO1: 1877 *v = entrylo_kvm_to_user(read_gc0_entrylo1()); 1878 break; 1879 case KVM_REG_MIPS_CP0_CONTEXT: 1880 *v = (long)read_gc0_context(); 1881 break; 1882 case KVM_REG_MIPS_CP0_CONTEXTCONFIG: 1883 if (!cpu_guest_has_contextconfig) 1884 return -EINVAL; 1885 *v = read_gc0_contextconfig(); 1886 break; 1887 case KVM_REG_MIPS_CP0_USERLOCAL: 1888 if (!cpu_guest_has_userlocal) 1889 return -EINVAL; 1890 *v = read_gc0_userlocal(); 1891 break; 1892 #ifdef CONFIG_64BIT 1893 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: 1894 if (!cpu_guest_has_contextconfig) 1895 return -EINVAL; 1896 *v = read_gc0_xcontextconfig(); 1897 break; 1898 #endif 1899 case KVM_REG_MIPS_CP0_PAGEMASK: 1900 *v = (long)read_gc0_pagemask(); 1901 break; 1902 case KVM_REG_MIPS_CP0_PAGEGRAIN: 1903 *v = (long)read_gc0_pagegrain(); 1904 break; 1905 case KVM_REG_MIPS_CP0_SEGCTL0: 1906 if (!cpu_guest_has_segments) 1907 return -EINVAL; 1908 *v = read_gc0_segctl0(); 1909 break; 1910 case KVM_REG_MIPS_CP0_SEGCTL1: 1911 if (!cpu_guest_has_segments) 1912 return -EINVAL; 1913 *v = read_gc0_segctl1(); 1914 break; 1915 case KVM_REG_MIPS_CP0_SEGCTL2: 1916 if (!cpu_guest_has_segments) 1917 return -EINVAL; 1918 *v = read_gc0_segctl2(); 1919 break; 1920 case KVM_REG_MIPS_CP0_PWBASE: 1921 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) 1922 return -EINVAL; 1923 *v = read_gc0_pwbase(); 1924 break; 1925 case KVM_REG_MIPS_CP0_PWFIELD: 1926 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) 1927 return -EINVAL; 1928 *v = read_gc0_pwfield(); 1929 break; 1930 case KVM_REG_MIPS_CP0_PWSIZE: 1931 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) 1932 return -EINVAL; 1933 *v = read_gc0_pwsize(); 1934 break; 1935 case KVM_REG_MIPS_CP0_WIRED: 1936 *v = (long)read_gc0_wired(); 1937 break; 1938 case KVM_REG_MIPS_CP0_PWCTL: 1939 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) 1940 return -EINVAL; 1941 *v = read_gc0_pwctl(); 1942 break; 1943 case KVM_REG_MIPS_CP0_HWRENA: 1944 *v = (long)read_gc0_hwrena(); 1945 break; 1946 case KVM_REG_MIPS_CP0_BADVADDR: 1947 *v = (long)read_gc0_badvaddr(); 1948 break; 1949 case KVM_REG_MIPS_CP0_BADINSTR: 1950 if (!cpu_guest_has_badinstr) 1951 return -EINVAL; 1952 *v = read_gc0_badinstr(); 1953 break; 1954 case KVM_REG_MIPS_CP0_BADINSTRP: 1955 if (!cpu_guest_has_badinstrp) 1956 return -EINVAL; 1957 *v = read_gc0_badinstrp(); 1958 break; 1959 case KVM_REG_MIPS_CP0_COUNT: 1960 *v = kvm_mips_read_count(vcpu); 1961 break; 1962 case KVM_REG_MIPS_CP0_ENTRYHI: 1963 *v = (long)read_gc0_entryhi(); 1964 break; 1965 case KVM_REG_MIPS_CP0_COMPARE: 1966 *v = (long)read_gc0_compare(); 1967 break; 1968 case KVM_REG_MIPS_CP0_STATUS: 1969 *v = (long)read_gc0_status(); 1970 break; 1971 case KVM_REG_MIPS_CP0_INTCTL: 1972 *v = read_gc0_intctl(); 1973 break; 1974 case KVM_REG_MIPS_CP0_CAUSE: 1975 *v = (long)read_gc0_cause(); 1976 break; 1977 case KVM_REG_MIPS_CP0_EPC: 1978 *v = (long)read_gc0_epc(); 1979 break; 1980 case KVM_REG_MIPS_CP0_PRID: 1981 switch (boot_cpu_type()) { 1982 case CPU_CAVIUM_OCTEON3: 1983 /* Octeon III has a read-only guest.PRid */ 1984 *v = read_gc0_prid(); 1985 break; 1986 default: 1987 *v = (long)kvm_read_c0_guest_prid(cop0); 1988 break; 1989 } 1990 break; 1991 case KVM_REG_MIPS_CP0_EBASE: 1992 *v = kvm_vz_read_gc0_ebase(); 1993 break; 1994 case KVM_REG_MIPS_CP0_CONFIG: 1995 *v = read_gc0_config(); 1996 break; 1997 case KVM_REG_MIPS_CP0_CONFIG1: 1998 if (!cpu_guest_has_conf1) 1999 return -EINVAL; 2000 *v = read_gc0_config1(); 2001 break; 2002 case KVM_REG_MIPS_CP0_CONFIG2: 2003 if (!cpu_guest_has_conf2) 2004 return -EINVAL; 2005 *v = read_gc0_config2(); 2006 break; 2007 case KVM_REG_MIPS_CP0_CONFIG3: 2008 if (!cpu_guest_has_conf3) 2009 return -EINVAL; 2010 *v = read_gc0_config3(); 2011 break; 2012 case KVM_REG_MIPS_CP0_CONFIG4: 2013 if (!cpu_guest_has_conf4) 2014 return -EINVAL; 2015 *v = read_gc0_config4(); 2016 break; 2017 case KVM_REG_MIPS_CP0_CONFIG5: 2018 if (!cpu_guest_has_conf5) 2019 return -EINVAL; 2020 *v = read_gc0_config5(); 2021 break; 2022 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): 2023 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) 2024 return -EINVAL; 2025 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); 2026 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) 2027 return -EINVAL; 2028 *v = vcpu->arch.maar[idx]; 2029 break; 2030 case KVM_REG_MIPS_CP0_MAARI: 2031 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) 2032 return -EINVAL; 2033 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); 2034 break; 2035 #ifdef CONFIG_64BIT 2036 case KVM_REG_MIPS_CP0_XCONTEXT: 2037 *v = read_gc0_xcontext(); 2038 break; 2039 #endif 2040 case KVM_REG_MIPS_CP0_ERROREPC: 2041 *v = (long)read_gc0_errorepc(); 2042 break; 2043 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 2044 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 2045 if (!cpu_guest_has_kscr(idx)) 2046 return -EINVAL; 2047 switch (idx) { 2048 case 2: 2049 *v = (long)read_gc0_kscratch1(); 2050 break; 2051 case 3: 2052 *v = (long)read_gc0_kscratch2(); 2053 break; 2054 case 4: 2055 *v = (long)read_gc0_kscratch3(); 2056 break; 2057 case 5: 2058 *v = (long)read_gc0_kscratch4(); 2059 break; 2060 case 6: 2061 *v = (long)read_gc0_kscratch5(); 2062 break; 2063 case 7: 2064 *v = (long)read_gc0_kscratch6(); 2065 break; 2066 } 2067 break; 2068 case KVM_REG_MIPS_COUNT_CTL: 2069 *v = vcpu->arch.count_ctl; 2070 break; 2071 case KVM_REG_MIPS_COUNT_RESUME: 2072 *v = ktime_to_ns(vcpu->arch.count_resume); 2073 break; 2074 case KVM_REG_MIPS_COUNT_HZ: 2075 *v = vcpu->arch.count_hz; 2076 break; 2077 default: 2078 return -EINVAL; 2079 } 2080 return 0; 2081 } 2082 2083 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, 2084 const struct kvm_one_reg *reg, 2085 s64 v) 2086 { 2087 struct mips_coproc *cop0 = vcpu->arch.cop0; 2088 unsigned int idx; 2089 int ret = 0; 2090 unsigned int cur, change; 2091 2092 switch (reg->id) { 2093 case KVM_REG_MIPS_CP0_INDEX: 2094 write_gc0_index(v); 2095 break; 2096 case KVM_REG_MIPS_CP0_ENTRYLO0: 2097 write_gc0_entrylo0(entrylo_user_to_kvm(v)); 2098 break; 2099 case KVM_REG_MIPS_CP0_ENTRYLO1: 2100 write_gc0_entrylo1(entrylo_user_to_kvm(v)); 2101 break; 2102 case KVM_REG_MIPS_CP0_CONTEXT: 2103 write_gc0_context(v); 2104 break; 2105 case KVM_REG_MIPS_CP0_CONTEXTCONFIG: 2106 if (!cpu_guest_has_contextconfig) 2107 return -EINVAL; 2108 write_gc0_contextconfig(v); 2109 break; 2110 case KVM_REG_MIPS_CP0_USERLOCAL: 2111 if (!cpu_guest_has_userlocal) 2112 return -EINVAL; 2113 write_gc0_userlocal(v); 2114 break; 2115 #ifdef CONFIG_64BIT 2116 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: 2117 if (!cpu_guest_has_contextconfig) 2118 return -EINVAL; 2119 write_gc0_xcontextconfig(v); 2120 break; 2121 #endif 2122 case KVM_REG_MIPS_CP0_PAGEMASK: 2123 write_gc0_pagemask(v); 2124 break; 2125 case KVM_REG_MIPS_CP0_PAGEGRAIN: 2126 write_gc0_pagegrain(v); 2127 break; 2128 case KVM_REG_MIPS_CP0_SEGCTL0: 2129 if (!cpu_guest_has_segments) 2130 return -EINVAL; 2131 write_gc0_segctl0(v); 2132 break; 2133 case KVM_REG_MIPS_CP0_SEGCTL1: 2134 if (!cpu_guest_has_segments) 2135 return -EINVAL; 2136 write_gc0_segctl1(v); 2137 break; 2138 case KVM_REG_MIPS_CP0_SEGCTL2: 2139 if (!cpu_guest_has_segments) 2140 return -EINVAL; 2141 write_gc0_segctl2(v); 2142 break; 2143 case KVM_REG_MIPS_CP0_PWBASE: 2144 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) 2145 return -EINVAL; 2146 write_gc0_pwbase(v); 2147 break; 2148 case KVM_REG_MIPS_CP0_PWFIELD: 2149 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) 2150 return -EINVAL; 2151 write_gc0_pwfield(v); 2152 break; 2153 case KVM_REG_MIPS_CP0_PWSIZE: 2154 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) 2155 return -EINVAL; 2156 write_gc0_pwsize(v); 2157 break; 2158 case KVM_REG_MIPS_CP0_WIRED: 2159 change_gc0_wired(MIPSR6_WIRED_WIRED, v); 2160 break; 2161 case KVM_REG_MIPS_CP0_PWCTL: 2162 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) 2163 return -EINVAL; 2164 write_gc0_pwctl(v); 2165 break; 2166 case KVM_REG_MIPS_CP0_HWRENA: 2167 write_gc0_hwrena(v); 2168 break; 2169 case KVM_REG_MIPS_CP0_BADVADDR: 2170 write_gc0_badvaddr(v); 2171 break; 2172 case KVM_REG_MIPS_CP0_BADINSTR: 2173 if (!cpu_guest_has_badinstr) 2174 return -EINVAL; 2175 write_gc0_badinstr(v); 2176 break; 2177 case KVM_REG_MIPS_CP0_BADINSTRP: 2178 if (!cpu_guest_has_badinstrp) 2179 return -EINVAL; 2180 write_gc0_badinstrp(v); 2181 break; 2182 case KVM_REG_MIPS_CP0_COUNT: 2183 kvm_mips_write_count(vcpu, v); 2184 break; 2185 case KVM_REG_MIPS_CP0_ENTRYHI: 2186 write_gc0_entryhi(v); 2187 break; 2188 case KVM_REG_MIPS_CP0_COMPARE: 2189 kvm_mips_write_compare(vcpu, v, false); 2190 break; 2191 case KVM_REG_MIPS_CP0_STATUS: 2192 write_gc0_status(v); 2193 break; 2194 case KVM_REG_MIPS_CP0_INTCTL: 2195 write_gc0_intctl(v); 2196 break; 2197 case KVM_REG_MIPS_CP0_CAUSE: 2198 /* 2199 * If the timer is stopped or started (DC bit) it must look 2200 * atomic with changes to the timer interrupt pending bit (TI). 2201 * A timer interrupt should not happen in between. 2202 */ 2203 if ((read_gc0_cause() ^ v) & CAUSEF_DC) { 2204 if (v & CAUSEF_DC) { 2205 /* disable timer first */ 2206 kvm_mips_count_disable_cause(vcpu); 2207 change_gc0_cause((u32)~CAUSEF_DC, v); 2208 } else { 2209 /* enable timer last */ 2210 change_gc0_cause((u32)~CAUSEF_DC, v); 2211 kvm_mips_count_enable_cause(vcpu); 2212 } 2213 } else { 2214 write_gc0_cause(v); 2215 } 2216 break; 2217 case KVM_REG_MIPS_CP0_EPC: 2218 write_gc0_epc(v); 2219 break; 2220 case KVM_REG_MIPS_CP0_PRID: 2221 switch (boot_cpu_type()) { 2222 case CPU_CAVIUM_OCTEON3: 2223 /* Octeon III has a guest.PRid, but its read-only */ 2224 break; 2225 default: 2226 kvm_write_c0_guest_prid(cop0, v); 2227 break; 2228 } 2229 break; 2230 case KVM_REG_MIPS_CP0_EBASE: 2231 kvm_vz_write_gc0_ebase(v); 2232 break; 2233 case KVM_REG_MIPS_CP0_CONFIG: 2234 cur = read_gc0_config(); 2235 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu); 2236 if (change) { 2237 v = cur ^ change; 2238 write_gc0_config(v); 2239 } 2240 break; 2241 case KVM_REG_MIPS_CP0_CONFIG1: 2242 if (!cpu_guest_has_conf1) 2243 break; 2244 cur = read_gc0_config1(); 2245 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu); 2246 if (change) { 2247 v = cur ^ change; 2248 write_gc0_config1(v); 2249 } 2250 break; 2251 case KVM_REG_MIPS_CP0_CONFIG2: 2252 if (!cpu_guest_has_conf2) 2253 break; 2254 cur = read_gc0_config2(); 2255 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu); 2256 if (change) { 2257 v = cur ^ change; 2258 write_gc0_config2(v); 2259 } 2260 break; 2261 case KVM_REG_MIPS_CP0_CONFIG3: 2262 if (!cpu_guest_has_conf3) 2263 break; 2264 cur = read_gc0_config3(); 2265 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu); 2266 if (change) { 2267 v = cur ^ change; 2268 write_gc0_config3(v); 2269 } 2270 break; 2271 case KVM_REG_MIPS_CP0_CONFIG4: 2272 if (!cpu_guest_has_conf4) 2273 break; 2274 cur = read_gc0_config4(); 2275 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu); 2276 if (change) { 2277 v = cur ^ change; 2278 write_gc0_config4(v); 2279 } 2280 break; 2281 case KVM_REG_MIPS_CP0_CONFIG5: 2282 if (!cpu_guest_has_conf5) 2283 break; 2284 cur = read_gc0_config5(); 2285 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu); 2286 if (change) { 2287 v = cur ^ change; 2288 write_gc0_config5(v); 2289 } 2290 break; 2291 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): 2292 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) 2293 return -EINVAL; 2294 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); 2295 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) 2296 return -EINVAL; 2297 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); 2298 break; 2299 case KVM_REG_MIPS_CP0_MAARI: 2300 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) 2301 return -EINVAL; 2302 kvm_write_maari(vcpu, v); 2303 break; 2304 #ifdef CONFIG_64BIT 2305 case KVM_REG_MIPS_CP0_XCONTEXT: 2306 write_gc0_xcontext(v); 2307 break; 2308 #endif 2309 case KVM_REG_MIPS_CP0_ERROREPC: 2310 write_gc0_errorepc(v); 2311 break; 2312 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: 2313 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; 2314 if (!cpu_guest_has_kscr(idx)) 2315 return -EINVAL; 2316 switch (idx) { 2317 case 2: 2318 write_gc0_kscratch1(v); 2319 break; 2320 case 3: 2321 write_gc0_kscratch2(v); 2322 break; 2323 case 4: 2324 write_gc0_kscratch3(v); 2325 break; 2326 case 5: 2327 write_gc0_kscratch4(v); 2328 break; 2329 case 6: 2330 write_gc0_kscratch5(v); 2331 break; 2332 case 7: 2333 write_gc0_kscratch6(v); 2334 break; 2335 } 2336 break; 2337 case KVM_REG_MIPS_COUNT_CTL: 2338 ret = kvm_mips_set_count_ctl(vcpu, v); 2339 break; 2340 case KVM_REG_MIPS_COUNT_RESUME: 2341 ret = kvm_mips_set_count_resume(vcpu, v); 2342 break; 2343 case KVM_REG_MIPS_COUNT_HZ: 2344 ret = kvm_mips_set_count_hz(vcpu, v); 2345 break; 2346 default: 2347 return -EINVAL; 2348 } 2349 return ret; 2350 } 2351 2352 #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache) 2353 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu) 2354 { 2355 unsigned long guestid = guestid_cache(cpu); 2356 2357 if (!(++guestid & GUESTID_MASK)) { 2358 if (cpu_has_vtag_icache) 2359 flush_icache_all(); 2360 2361 if (!guestid) /* fix version if needed */ 2362 guestid = GUESTID_FIRST_VERSION; 2363 2364 ++guestid; /* guestid 0 reserved for root */ 2365 2366 /* start new guestid cycle */ 2367 kvm_vz_local_flush_roottlb_all_guests(); 2368 kvm_vz_local_flush_guesttlb_all(); 2369 } 2370 2371 guestid_cache(cpu) = guestid; 2372 } 2373 2374 /* Returns 1 if the guest TLB may be clobbered */ 2375 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu) 2376 { 2377 int ret = 0; 2378 int i; 2379 2380 if (!kvm_request_pending(vcpu)) 2381 return 0; 2382 2383 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 2384 if (cpu_has_guestid) { 2385 /* Drop all GuestIDs for this VCPU */ 2386 for_each_possible_cpu(i) 2387 vcpu->arch.vzguestid[i] = 0; 2388 /* This will clobber guest TLB contents too */ 2389 ret = 1; 2390 } 2391 /* 2392 * For Root ASID Dealias (RAD) we don't do anything here, but we 2393 * still need the request to ensure we recheck asid_flush_mask. 2394 * We can still return 0 as only the root TLB will be affected 2395 * by a root ASID flush. 2396 */ 2397 } 2398 2399 return ret; 2400 } 2401 2402 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu) 2403 { 2404 unsigned int wired = read_gc0_wired(); 2405 struct kvm_mips_tlb *tlbs; 2406 int i; 2407 2408 /* Expand the wired TLB array if necessary */ 2409 wired &= MIPSR6_WIRED_WIRED; 2410 if (wired > vcpu->arch.wired_tlb_limit) { 2411 tlbs = krealloc(vcpu->arch.wired_tlb, wired * 2412 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); 2413 if (WARN_ON(!tlbs)) { 2414 /* Save whatever we can */ 2415 wired = vcpu->arch.wired_tlb_limit; 2416 } else { 2417 vcpu->arch.wired_tlb = tlbs; 2418 vcpu->arch.wired_tlb_limit = wired; 2419 } 2420 } 2421 2422 if (wired) 2423 /* Save wired entries from the guest TLB */ 2424 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); 2425 /* Invalidate any dropped entries since last time */ 2426 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { 2427 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 2428 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; 2429 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; 2430 vcpu->arch.wired_tlb[i].tlb_mask = 0; 2431 } 2432 vcpu->arch.wired_tlb_used = wired; 2433 } 2434 2435 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu) 2436 { 2437 /* Load wired entries into the guest TLB */ 2438 if (vcpu->arch.wired_tlb) 2439 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, 2440 vcpu->arch.wired_tlb_used); 2441 } 2442 2443 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) 2444 { 2445 struct kvm *kvm = vcpu->kvm; 2446 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; 2447 bool migrated; 2448 2449 /* 2450 * Are we entering guest context on a different CPU to last time? 2451 * If so, the VCPU's guest TLB state on this CPU may be stale. 2452 */ 2453 migrated = (vcpu->arch.last_exec_cpu != cpu); 2454 vcpu->arch.last_exec_cpu = cpu; 2455 2456 /* 2457 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and 2458 * remains set until another vcpu is loaded in. As a rule GuestRID 2459 * remains zeroed when in root context unless the kernel is busy 2460 * manipulating guest tlb entries. 2461 */ 2462 if (cpu_has_guestid) { 2463 /* 2464 * Check if our GuestID is of an older version and thus invalid. 2465 * 2466 * We also discard the stored GuestID if we've executed on 2467 * another CPU, as the guest mappings may have changed without 2468 * hypervisor knowledge. 2469 */ 2470 if (migrated || 2471 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & 2472 GUESTID_VERSION_MASK) { 2473 kvm_vz_get_new_guestid(cpu, vcpu); 2474 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); 2475 trace_kvm_guestid_change(vcpu, 2476 vcpu->arch.vzguestid[cpu]); 2477 } 2478 2479 /* Restore GuestID */ 2480 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); 2481 } else { 2482 /* 2483 * The Guest TLB only stores a single guest's TLB state, so 2484 * flush it if another VCPU has executed on this CPU. 2485 * 2486 * We also flush if we've executed on another CPU, as the guest 2487 * mappings may have changed without hypervisor knowledge. 2488 */ 2489 if (migrated || last_exec_vcpu[cpu] != vcpu) 2490 kvm_vz_local_flush_guesttlb_all(); 2491 last_exec_vcpu[cpu] = vcpu; 2492 2493 /* 2494 * Root ASID dealiases guest GPA mappings in the root TLB. 2495 * Allocate new root ASID if needed. 2496 */ 2497 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)) 2498 get_new_mmu_context(gpa_mm); 2499 else 2500 check_mmu_context(gpa_mm); 2501 } 2502 } 2503 2504 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2505 { 2506 struct mips_coproc *cop0 = vcpu->arch.cop0; 2507 bool migrated, all; 2508 2509 /* 2510 * Have we migrated to a different CPU? 2511 * If so, any old guest TLB state may be stale. 2512 */ 2513 migrated = (vcpu->arch.last_sched_cpu != cpu); 2514 2515 /* 2516 * Was this the last VCPU to run on this CPU? 2517 * If not, any old guest state from this VCPU will have been clobbered. 2518 */ 2519 all = migrated || (last_vcpu[cpu] != vcpu); 2520 last_vcpu[cpu] = vcpu; 2521 2522 /* 2523 * Restore CP0_Wired unconditionally as we clear it after use, and 2524 * restore wired guest TLB entries (while in guest context). 2525 */ 2526 kvm_restore_gc0_wired(cop0); 2527 if (current->flags & PF_VCPU) { 2528 tlbw_use_hazard(); 2529 kvm_vz_vcpu_load_tlb(vcpu, cpu); 2530 kvm_vz_vcpu_load_wired(vcpu); 2531 } 2532 2533 /* 2534 * Restore timer state regardless, as e.g. Cause.TI can change over time 2535 * if left unmaintained. 2536 */ 2537 kvm_vz_restore_timer(vcpu); 2538 2539 /* Set MC bit if we want to trace guest mode changes */ 2540 if (kvm_trace_guest_mode_change) 2541 set_c0_guestctl0(MIPS_GCTL0_MC); 2542 else 2543 clear_c0_guestctl0(MIPS_GCTL0_MC); 2544 2545 /* Don't bother restoring registers multiple times unless necessary */ 2546 if (!all) 2547 return 0; 2548 2549 /* 2550 * Restore config registers first, as some implementations restrict 2551 * writes to other registers when the corresponding feature bits aren't 2552 * set. For example Status.CU1 cannot be set unless Config1.FP is set. 2553 */ 2554 kvm_restore_gc0_config(cop0); 2555 if (cpu_guest_has_conf1) 2556 kvm_restore_gc0_config1(cop0); 2557 if (cpu_guest_has_conf2) 2558 kvm_restore_gc0_config2(cop0); 2559 if (cpu_guest_has_conf3) 2560 kvm_restore_gc0_config3(cop0); 2561 if (cpu_guest_has_conf4) 2562 kvm_restore_gc0_config4(cop0); 2563 if (cpu_guest_has_conf5) 2564 kvm_restore_gc0_config5(cop0); 2565 if (cpu_guest_has_conf6) 2566 kvm_restore_gc0_config6(cop0); 2567 if (cpu_guest_has_conf7) 2568 kvm_restore_gc0_config7(cop0); 2569 2570 kvm_restore_gc0_index(cop0); 2571 kvm_restore_gc0_entrylo0(cop0); 2572 kvm_restore_gc0_entrylo1(cop0); 2573 kvm_restore_gc0_context(cop0); 2574 if (cpu_guest_has_contextconfig) 2575 kvm_restore_gc0_contextconfig(cop0); 2576 #ifdef CONFIG_64BIT 2577 kvm_restore_gc0_xcontext(cop0); 2578 if (cpu_guest_has_contextconfig) 2579 kvm_restore_gc0_xcontextconfig(cop0); 2580 #endif 2581 kvm_restore_gc0_pagemask(cop0); 2582 kvm_restore_gc0_pagegrain(cop0); 2583 kvm_restore_gc0_hwrena(cop0); 2584 kvm_restore_gc0_badvaddr(cop0); 2585 kvm_restore_gc0_entryhi(cop0); 2586 kvm_restore_gc0_status(cop0); 2587 kvm_restore_gc0_intctl(cop0); 2588 kvm_restore_gc0_epc(cop0); 2589 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0)); 2590 if (cpu_guest_has_userlocal) 2591 kvm_restore_gc0_userlocal(cop0); 2592 2593 kvm_restore_gc0_errorepc(cop0); 2594 2595 /* restore KScratch registers if enabled in guest */ 2596 if (cpu_guest_has_conf4) { 2597 if (cpu_guest_has_kscr(2)) 2598 kvm_restore_gc0_kscratch1(cop0); 2599 if (cpu_guest_has_kscr(3)) 2600 kvm_restore_gc0_kscratch2(cop0); 2601 if (cpu_guest_has_kscr(4)) 2602 kvm_restore_gc0_kscratch3(cop0); 2603 if (cpu_guest_has_kscr(5)) 2604 kvm_restore_gc0_kscratch4(cop0); 2605 if (cpu_guest_has_kscr(6)) 2606 kvm_restore_gc0_kscratch5(cop0); 2607 if (cpu_guest_has_kscr(7)) 2608 kvm_restore_gc0_kscratch6(cop0); 2609 } 2610 2611 if (cpu_guest_has_badinstr) 2612 kvm_restore_gc0_badinstr(cop0); 2613 if (cpu_guest_has_badinstrp) 2614 kvm_restore_gc0_badinstrp(cop0); 2615 2616 if (cpu_guest_has_segments) { 2617 kvm_restore_gc0_segctl0(cop0); 2618 kvm_restore_gc0_segctl1(cop0); 2619 kvm_restore_gc0_segctl2(cop0); 2620 } 2621 2622 /* restore HTW registers */ 2623 if (cpu_guest_has_htw || cpu_guest_has_ldpte) { 2624 kvm_restore_gc0_pwbase(cop0); 2625 kvm_restore_gc0_pwfield(cop0); 2626 kvm_restore_gc0_pwsize(cop0); 2627 kvm_restore_gc0_pwctl(cop0); 2628 } 2629 2630 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */ 2631 if (cpu_has_guestctl2) 2632 write_c0_guestctl2( 2633 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); 2634 2635 /* 2636 * We should clear linked load bit to break interrupted atomics. This 2637 * prevents a SC on the next VCPU from succeeding by matching a LL on 2638 * the previous VCPU. 2639 */ 2640 if (cpu_guest_has_rw_llb) 2641 write_gc0_lladdr(0); 2642 2643 return 0; 2644 } 2645 2646 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) 2647 { 2648 struct mips_coproc *cop0 = vcpu->arch.cop0; 2649 2650 if (current->flags & PF_VCPU) 2651 kvm_vz_vcpu_save_wired(vcpu); 2652 2653 kvm_lose_fpu(vcpu); 2654 2655 kvm_save_gc0_index(cop0); 2656 kvm_save_gc0_entrylo0(cop0); 2657 kvm_save_gc0_entrylo1(cop0); 2658 kvm_save_gc0_context(cop0); 2659 if (cpu_guest_has_contextconfig) 2660 kvm_save_gc0_contextconfig(cop0); 2661 #ifdef CONFIG_64BIT 2662 kvm_save_gc0_xcontext(cop0); 2663 if (cpu_guest_has_contextconfig) 2664 kvm_save_gc0_xcontextconfig(cop0); 2665 #endif 2666 kvm_save_gc0_pagemask(cop0); 2667 kvm_save_gc0_pagegrain(cop0); 2668 kvm_save_gc0_wired(cop0); 2669 /* allow wired TLB entries to be overwritten */ 2670 clear_gc0_wired(MIPSR6_WIRED_WIRED); 2671 kvm_save_gc0_hwrena(cop0); 2672 kvm_save_gc0_badvaddr(cop0); 2673 kvm_save_gc0_entryhi(cop0); 2674 kvm_save_gc0_status(cop0); 2675 kvm_save_gc0_intctl(cop0); 2676 kvm_save_gc0_epc(cop0); 2677 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase()); 2678 if (cpu_guest_has_userlocal) 2679 kvm_save_gc0_userlocal(cop0); 2680 2681 /* only save implemented config registers */ 2682 kvm_save_gc0_config(cop0); 2683 if (cpu_guest_has_conf1) 2684 kvm_save_gc0_config1(cop0); 2685 if (cpu_guest_has_conf2) 2686 kvm_save_gc0_config2(cop0); 2687 if (cpu_guest_has_conf3) 2688 kvm_save_gc0_config3(cop0); 2689 if (cpu_guest_has_conf4) 2690 kvm_save_gc0_config4(cop0); 2691 if (cpu_guest_has_conf5) 2692 kvm_save_gc0_config5(cop0); 2693 if (cpu_guest_has_conf6) 2694 kvm_save_gc0_config6(cop0); 2695 if (cpu_guest_has_conf7) 2696 kvm_save_gc0_config7(cop0); 2697 2698 kvm_save_gc0_errorepc(cop0); 2699 2700 /* save KScratch registers if enabled in guest */ 2701 if (cpu_guest_has_conf4) { 2702 if (cpu_guest_has_kscr(2)) 2703 kvm_save_gc0_kscratch1(cop0); 2704 if (cpu_guest_has_kscr(3)) 2705 kvm_save_gc0_kscratch2(cop0); 2706 if (cpu_guest_has_kscr(4)) 2707 kvm_save_gc0_kscratch3(cop0); 2708 if (cpu_guest_has_kscr(5)) 2709 kvm_save_gc0_kscratch4(cop0); 2710 if (cpu_guest_has_kscr(6)) 2711 kvm_save_gc0_kscratch5(cop0); 2712 if (cpu_guest_has_kscr(7)) 2713 kvm_save_gc0_kscratch6(cop0); 2714 } 2715 2716 if (cpu_guest_has_badinstr) 2717 kvm_save_gc0_badinstr(cop0); 2718 if (cpu_guest_has_badinstrp) 2719 kvm_save_gc0_badinstrp(cop0); 2720 2721 if (cpu_guest_has_segments) { 2722 kvm_save_gc0_segctl0(cop0); 2723 kvm_save_gc0_segctl1(cop0); 2724 kvm_save_gc0_segctl2(cop0); 2725 } 2726 2727 /* save HTW registers if enabled in guest */ 2728 if (cpu_guest_has_ldpte || (cpu_guest_has_htw && 2729 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) { 2730 kvm_save_gc0_pwbase(cop0); 2731 kvm_save_gc0_pwfield(cop0); 2732 kvm_save_gc0_pwsize(cop0); 2733 kvm_save_gc0_pwctl(cop0); 2734 } 2735 2736 kvm_vz_save_timer(vcpu); 2737 2738 /* save Root.GuestCtl2 in unused Guest guestctl2 register */ 2739 if (cpu_has_guestctl2) 2740 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 2741 read_c0_guestctl2(); 2742 2743 return 0; 2744 } 2745 2746 /** 2747 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB. 2748 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries). 2749 * 2750 * Attempt to resize the guest VTLB by writing guest Config registers. This is 2751 * necessary for cores with a shared root/guest TLB to avoid overlap with wired 2752 * entries in the root VTLB. 2753 * 2754 * Returns: The resulting guest VTLB size. 2755 */ 2756 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size) 2757 { 2758 unsigned int config4 = 0, ret = 0, limit; 2759 2760 /* Write MMUSize - 1 into guest Config registers */ 2761 if (cpu_guest_has_conf1) 2762 change_gc0_config1(MIPS_CONF1_TLBS, 2763 (size - 1) << MIPS_CONF1_TLBS_SHIFT); 2764 if (cpu_guest_has_conf4) { 2765 config4 = read_gc0_config4(); 2766 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 2767 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) { 2768 config4 &= ~MIPS_CONF4_VTLBSIZEEXT; 2769 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 2770 MIPS_CONF4_VTLBSIZEEXT_SHIFT; 2771 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 2772 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) { 2773 config4 &= ~MIPS_CONF4_MMUSIZEEXT; 2774 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << 2775 MIPS_CONF4_MMUSIZEEXT_SHIFT; 2776 } 2777 write_gc0_config4(config4); 2778 } 2779 2780 /* 2781 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it 2782 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write 2783 * not dropped) 2784 */ 2785 if (cpu_has_mips_r6) { 2786 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >> 2787 MIPSR6_WIRED_LIMIT_SHIFT; 2788 if (size - 1 <= limit) 2789 limit = 0; 2790 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT); 2791 } 2792 2793 /* Read back MMUSize - 1 */ 2794 back_to_back_c0_hazard(); 2795 if (cpu_guest_has_conf1) 2796 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >> 2797 MIPS_CONF1_TLBS_SHIFT; 2798 if (config4) { 2799 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == 2800 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) 2801 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> 2802 MIPS_CONF4_VTLBSIZEEXT_SHIFT) << 2803 MIPS_CONF1_TLBS_SIZE; 2804 else if ((config4 & MIPS_CONF4_MMUEXTDEF) == 2805 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) 2806 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >> 2807 MIPS_CONF4_MMUSIZEEXT_SHIFT) << 2808 MIPS_CONF1_TLBS_SIZE; 2809 } 2810 return ret + 1; 2811 } 2812 2813 static int kvm_vz_hardware_enable(void) 2814 { 2815 unsigned int mmu_size, guest_mmu_size, ftlb_size; 2816 u64 guest_cvmctl, cvmvmconfig; 2817 2818 switch (current_cpu_type()) { 2819 case CPU_CAVIUM_OCTEON3: 2820 /* Set up guest timer/perfcount IRQ lines */ 2821 guest_cvmctl = read_gc0_cvmctl(); 2822 guest_cvmctl &= ~CVMCTL_IPTI; 2823 guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT; 2824 guest_cvmctl &= ~CVMCTL_IPPCI; 2825 guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT; 2826 write_gc0_cvmctl(guest_cvmctl); 2827 2828 cvmvmconfig = read_c0_cvmvmconfig(); 2829 /* No I/O hole translation. */ 2830 cvmvmconfig |= CVMVMCONF_DGHT; 2831 /* Halve the root MMU size */ 2832 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1) 2833 >> CVMVMCONF_MMUSIZEM1_S) + 1; 2834 guest_mmu_size = mmu_size / 2; 2835 mmu_size -= guest_mmu_size; 2836 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1; 2837 cvmvmconfig |= mmu_size - 1; 2838 write_c0_cvmvmconfig(cvmvmconfig); 2839 2840 /* Update our records */ 2841 current_cpu_data.tlbsize = mmu_size; 2842 current_cpu_data.tlbsizevtlb = mmu_size; 2843 current_cpu_data.guest.tlbsize = guest_mmu_size; 2844 2845 /* Flush moved entries in new (guest) context */ 2846 kvm_vz_local_flush_guesttlb_all(); 2847 break; 2848 default: 2849 /* 2850 * ImgTec cores tend to use a shared root/guest TLB. To avoid 2851 * overlap of root wired and guest entries, the guest TLB may 2852 * need resizing. 2853 */ 2854 mmu_size = current_cpu_data.tlbsizevtlb; 2855 ftlb_size = current_cpu_data.tlbsize - mmu_size; 2856 2857 /* Try switching to maximum guest VTLB size for flush */ 2858 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size); 2859 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2860 kvm_vz_local_flush_guesttlb_all(); 2861 2862 /* 2863 * Reduce to make space for root wired entries and at least 2 2864 * root non-wired entries. This does assume that long-term wired 2865 * entries won't be added later. 2866 */ 2867 guest_mmu_size = mmu_size - num_wired_entries() - 2; 2868 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size); 2869 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; 2870 2871 /* 2872 * Write the VTLB size, but if another CPU has already written, 2873 * check it matches or we won't provide a consistent view to the 2874 * guest. If this ever happens it suggests an asymmetric number 2875 * of wired entries. 2876 */ 2877 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) && 2878 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size, 2879 "Available guest VTLB size mismatch")) 2880 return -EINVAL; 2881 break; 2882 } 2883 2884 /* 2885 * Enable virtualization features granting guest direct control of 2886 * certain features: 2887 * CP0=1: Guest coprocessor 0 context. 2888 * AT=Guest: Guest MMU. 2889 * CG=1: Hit (virtual address) CACHE operations (optional). 2890 * CF=1: Guest Config registers. 2891 * CGI=1: Indexed flush CACHE operations (optional). 2892 */ 2893 write_c0_guestctl0(MIPS_GCTL0_CP0 | 2894 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) | 2895 MIPS_GCTL0_CG | MIPS_GCTL0_CF); 2896 if (cpu_has_guestctl0ext) { 2897 if (current_cpu_type() != CPU_LOONGSON64) 2898 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); 2899 else 2900 clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); 2901 } 2902 2903 if (cpu_has_guestid) { 2904 write_c0_guestctl1(0); 2905 kvm_vz_local_flush_roottlb_all_guests(); 2906 2907 GUESTID_MASK = current_cpu_data.guestid_mask; 2908 GUESTID_FIRST_VERSION = GUESTID_MASK + 1; 2909 GUESTID_VERSION_MASK = ~GUESTID_MASK; 2910 2911 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION; 2912 } 2913 2914 /* clear any pending injected virtual guest interrupts */ 2915 if (cpu_has_guestctl2) 2916 clear_c0_guestctl2(0x3f << 10); 2917 2918 #ifdef CONFIG_CPU_LOONGSON64 2919 /* Control guest CCA attribute */ 2920 if (cpu_has_csr()) 2921 csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec); 2922 #endif 2923 2924 return 0; 2925 } 2926 2927 static void kvm_vz_hardware_disable(void) 2928 { 2929 u64 cvmvmconfig; 2930 unsigned int mmu_size; 2931 2932 /* Flush any remaining guest TLB entries */ 2933 kvm_vz_local_flush_guesttlb_all(); 2934 2935 switch (current_cpu_type()) { 2936 case CPU_CAVIUM_OCTEON3: 2937 /* 2938 * Allocate whole TLB for root. Existing guest TLB entries will 2939 * change ownership to the root TLB. We should be safe though as 2940 * they've already been flushed above while in guest TLB. 2941 */ 2942 cvmvmconfig = read_c0_cvmvmconfig(); 2943 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1) 2944 >> CVMVMCONF_MMUSIZEM1_S) + 1; 2945 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1; 2946 cvmvmconfig |= mmu_size - 1; 2947 write_c0_cvmvmconfig(cvmvmconfig); 2948 2949 /* Update our records */ 2950 current_cpu_data.tlbsize = mmu_size; 2951 current_cpu_data.tlbsizevtlb = mmu_size; 2952 current_cpu_data.guest.tlbsize = 0; 2953 2954 /* Flush moved entries in new (root) context */ 2955 local_flush_tlb_all(); 2956 break; 2957 } 2958 2959 if (cpu_has_guestid) { 2960 write_c0_guestctl1(0); 2961 kvm_vz_local_flush_roottlb_all_guests(); 2962 } 2963 } 2964 2965 static int kvm_vz_check_extension(struct kvm *kvm, long ext) 2966 { 2967 int r; 2968 2969 switch (ext) { 2970 case KVM_CAP_MIPS_VZ: 2971 /* we wouldn't be here unless cpu_has_vz */ 2972 r = 1; 2973 break; 2974 #ifdef CONFIG_64BIT 2975 case KVM_CAP_MIPS_64BIT: 2976 /* We support 64-bit registers/operations and addresses */ 2977 r = 2; 2978 break; 2979 #endif 2980 case KVM_CAP_IOEVENTFD: 2981 r = 1; 2982 break; 2983 default: 2984 r = 0; 2985 break; 2986 } 2987 2988 return r; 2989 } 2990 2991 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu) 2992 { 2993 int i; 2994 2995 for_each_possible_cpu(i) 2996 vcpu->arch.vzguestid[i] = 0; 2997 2998 return 0; 2999 } 3000 3001 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu) 3002 { 3003 int cpu; 3004 3005 /* 3006 * If the VCPU is freed and reused as another VCPU, we don't want the 3007 * matching pointer wrongly hanging around in last_vcpu[] or 3008 * last_exec_vcpu[]. 3009 */ 3010 for_each_possible_cpu(cpu) { 3011 if (last_vcpu[cpu] == vcpu) 3012 last_vcpu[cpu] = NULL; 3013 if (last_exec_vcpu[cpu] == vcpu) 3014 last_exec_vcpu[cpu] = NULL; 3015 } 3016 } 3017 3018 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) 3019 { 3020 struct mips_coproc *cop0 = vcpu->arch.cop0; 3021 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ 3022 3023 /* 3024 * Start off the timer at the same frequency as the host timer, but the 3025 * soft timer doesn't handle frequencies greater than 1GHz yet. 3026 */ 3027 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC) 3028 count_hz = mips_hpt_frequency; 3029 kvm_mips_init_count(vcpu, count_hz); 3030 3031 /* 3032 * Initialize guest register state to valid architectural reset state. 3033 */ 3034 3035 /* PageGrain */ 3036 if (cpu_has_mips_r5 || cpu_has_mips_r6) 3037 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC); 3038 /* Wired */ 3039 if (cpu_has_mips_r6) 3040 kvm_write_sw_gc0_wired(cop0, 3041 read_gc0_wired() & MIPSR6_WIRED_LIMIT); 3042 /* Status */ 3043 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL); 3044 if (cpu_has_mips_r5 || cpu_has_mips_r6) 3045 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status()); 3046 /* IntCtl */ 3047 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() & 3048 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI)); 3049 /* PRId */ 3050 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id); 3051 /* EBase */ 3052 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); 3053 /* Config */ 3054 kvm_save_gc0_config(cop0); 3055 /* architecturally writable (e.g. from guest) */ 3056 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK, 3057 _page_cachable_default >> _CACHE_SHIFT); 3058 /* architecturally read only, but maybe writable from root */ 3059 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config()); 3060 if (cpu_guest_has_conf1) { 3061 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M); 3062 /* Config1 */ 3063 kvm_save_gc0_config1(cop0); 3064 /* architecturally read only, but maybe writable from root */ 3065 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 | 3066 MIPS_CONF1_MD | 3067 MIPS_CONF1_PC | 3068 MIPS_CONF1_WR | 3069 MIPS_CONF1_CA | 3070 MIPS_CONF1_FP); 3071 } 3072 if (cpu_guest_has_conf2) { 3073 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M); 3074 /* Config2 */ 3075 kvm_save_gc0_config2(cop0); 3076 } 3077 if (cpu_guest_has_conf3) { 3078 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M); 3079 /* Config3 */ 3080 kvm_save_gc0_config3(cop0); 3081 /* architecturally writable (e.g. from guest) */ 3082 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE); 3083 /* architecturally read only, but maybe writable from root */ 3084 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA | 3085 MIPS_CONF3_BPG | 3086 MIPS_CONF3_ULRI | 3087 MIPS_CONF3_DSP | 3088 MIPS_CONF3_CTXTC | 3089 MIPS_CONF3_ITL | 3090 MIPS_CONF3_LPA | 3091 MIPS_CONF3_VEIC | 3092 MIPS_CONF3_VINT | 3093 MIPS_CONF3_SP | 3094 MIPS_CONF3_CDMM | 3095 MIPS_CONF3_MT | 3096 MIPS_CONF3_SM | 3097 MIPS_CONF3_TL); 3098 } 3099 if (cpu_guest_has_conf4) { 3100 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M); 3101 /* Config4 */ 3102 kvm_save_gc0_config4(cop0); 3103 } 3104 if (cpu_guest_has_conf5) { 3105 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M); 3106 /* Config5 */ 3107 kvm_save_gc0_config5(cop0); 3108 /* architecturally writable (e.g. from guest) */ 3109 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K | 3110 MIPS_CONF5_CV | 3111 MIPS_CONF5_MSAEN | 3112 MIPS_CONF5_UFE | 3113 MIPS_CONF5_FRE | 3114 MIPS_CONF5_SBRI | 3115 MIPS_CONF5_UFR); 3116 /* architecturally read only, but maybe writable from root */ 3117 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP); 3118 } 3119 3120 if (cpu_guest_has_contextconfig) { 3121 /* ContextConfig */ 3122 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0); 3123 #ifdef CONFIG_64BIT 3124 /* XContextConfig */ 3125 /* bits SEGBITS-13+3:4 set */ 3126 kvm_write_sw_gc0_xcontextconfig(cop0, 3127 ((1ull << (cpu_vmbits - 13)) - 1) << 4); 3128 #endif 3129 } 3130 3131 /* Implementation dependent, use the legacy layout */ 3132 if (cpu_guest_has_segments) { 3133 /* SegCtl0, SegCtl1, SegCtl2 */ 3134 kvm_write_sw_gc0_segctl0(cop0, 0x00200010); 3135 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 | 3136 (_page_cachable_default >> _CACHE_SHIFT) << 3137 (16 + MIPS_SEGCFG_C_SHIFT)); 3138 kvm_write_sw_gc0_segctl2(cop0, 0x00380438); 3139 } 3140 3141 /* reset HTW registers */ 3142 if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) { 3143 /* PWField */ 3144 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302); 3145 /* PWSize */ 3146 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT); 3147 } 3148 3149 /* start with no pending virtual guest interrupts */ 3150 if (cpu_has_guestctl2) 3151 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; 3152 3153 /* Put PC at reset vector */ 3154 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); 3155 3156 return 0; 3157 } 3158 3159 static void kvm_vz_flush_shadow_all(struct kvm *kvm) 3160 { 3161 if (cpu_has_guestid) { 3162 /* Flush GuestID for each VCPU individually */ 3163 kvm_flush_remote_tlbs(kvm); 3164 } else { 3165 /* 3166 * For each CPU there is a single GPA ASID used by all VCPUs in 3167 * the VM, so it doesn't make sense for the VCPUs to handle 3168 * invalidation of these ASIDs individually. 3169 * 3170 * Instead mark all CPUs as needing ASID invalidation in 3171 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to 3172 * kick any running VCPUs so they check asid_flush_mask. 3173 */ 3174 cpumask_setall(&kvm->arch.asid_flush_mask); 3175 kvm_flush_remote_tlbs(kvm); 3176 } 3177 } 3178 3179 static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, 3180 const struct kvm_memory_slot *slot) 3181 { 3182 kvm_vz_flush_shadow_all(kvm); 3183 } 3184 3185 static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu) 3186 { 3187 int cpu = smp_processor_id(); 3188 int preserve_guest_tlb; 3189 3190 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu); 3191 3192 if (preserve_guest_tlb) 3193 kvm_vz_vcpu_save_wired(vcpu); 3194 3195 kvm_vz_vcpu_load_tlb(vcpu, cpu); 3196 3197 if (preserve_guest_tlb) 3198 kvm_vz_vcpu_load_wired(vcpu); 3199 } 3200 3201 static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) 3202 { 3203 int cpu = smp_processor_id(); 3204 int r; 3205 3206 kvm_vz_acquire_htimer(vcpu); 3207 /* Check if we have any exceptions/interrupts pending */ 3208 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause()); 3209 3210 kvm_vz_check_requests(vcpu, cpu); 3211 kvm_vz_vcpu_load_tlb(vcpu, cpu); 3212 kvm_vz_vcpu_load_wired(vcpu); 3213 3214 r = vcpu->arch.vcpu_run(run, vcpu); 3215 3216 kvm_vz_vcpu_save_wired(vcpu); 3217 3218 return r; 3219 } 3220 3221 static struct kvm_mips_callbacks kvm_vz_callbacks = { 3222 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable, 3223 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss, 3224 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss, 3225 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss, 3226 .handle_addr_err_st = kvm_trap_vz_no_handler, 3227 .handle_addr_err_ld = kvm_trap_vz_no_handler, 3228 .handle_syscall = kvm_trap_vz_no_handler, 3229 .handle_res_inst = kvm_trap_vz_no_handler, 3230 .handle_break = kvm_trap_vz_no_handler, 3231 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled, 3232 .handle_guest_exit = kvm_trap_vz_handle_guest_exit, 3233 3234 .hardware_enable = kvm_vz_hardware_enable, 3235 .hardware_disable = kvm_vz_hardware_disable, 3236 .check_extension = kvm_vz_check_extension, 3237 .vcpu_init = kvm_vz_vcpu_init, 3238 .vcpu_uninit = kvm_vz_vcpu_uninit, 3239 .vcpu_setup = kvm_vz_vcpu_setup, 3240 .flush_shadow_all = kvm_vz_flush_shadow_all, 3241 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot, 3242 .gva_to_gpa = kvm_vz_gva_to_gpa_cb, 3243 .queue_timer_int = kvm_vz_queue_timer_int_cb, 3244 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb, 3245 .queue_io_int = kvm_vz_queue_io_int_cb, 3246 .dequeue_io_int = kvm_vz_dequeue_io_int_cb, 3247 .irq_deliver = kvm_vz_irq_deliver_cb, 3248 .irq_clear = kvm_vz_irq_clear_cb, 3249 .num_regs = kvm_vz_num_regs, 3250 .copy_reg_indices = kvm_vz_copy_reg_indices, 3251 .get_one_reg = kvm_vz_get_one_reg, 3252 .set_one_reg = kvm_vz_set_one_reg, 3253 .vcpu_load = kvm_vz_vcpu_load, 3254 .vcpu_put = kvm_vz_vcpu_put, 3255 .vcpu_run = kvm_vz_vcpu_run, 3256 .vcpu_reenter = kvm_vz_vcpu_reenter, 3257 }; 3258 3259 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) 3260 { 3261 if (!cpu_has_vz) 3262 return -ENODEV; 3263 3264 /* 3265 * VZ requires at least 2 KScratch registers, so it should have been 3266 * possible to allocate pgd_reg. 3267 */ 3268 if (WARN(pgd_reg == -1, 3269 "pgd_reg not allocated even though cpu_has_vz\n")) 3270 return -ENODEV; 3271 3272 pr_info("Starting KVM with MIPS VZ extensions\n"); 3273 3274 *install_callbacks = &kvm_vz_callbacks; 3275 return 0; 3276 } 3277