1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 8 * 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/mm.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/kvm_host.h> 19 #include <linux/srcu.h> 20 21 #include <asm/cpu.h> 22 #include <asm/bootinfo.h> 23 #include <asm/mmu_context.h> 24 #include <asm/pgtable.h> 25 #include <asm/cacheflush.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbdebug.h> 28 29 #undef CONFIG_MIPS_MT 30 #include <asm/r4kcache.h> 31 #define CONFIG_MIPS_MT 32 33 #define KVM_GUEST_PC_TLB 0 34 #define KVM_GUEST_SP_TLB 1 35 36 #ifdef CONFIG_KVM_MIPS_VZ 37 unsigned long GUESTID_MASK; 38 EXPORT_SYMBOL_GPL(GUESTID_MASK); 39 unsigned long GUESTID_FIRST_VERSION; 40 EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION); 41 unsigned long GUESTID_VERSION_MASK; 42 EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK); 43 44 static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) 45 { 46 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm; 47 48 if (cpu_has_guestid) 49 return 0; 50 else 51 return cpu_asid(smp_processor_id(), gpa_mm); 52 } 53 #endif 54 55 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 56 { 57 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 58 int cpu = smp_processor_id(); 59 60 return cpu_asid(cpu, kern_mm); 61 } 62 63 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 64 { 65 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 66 int cpu = smp_processor_id(); 67 68 return cpu_asid(cpu, user_mm); 69 } 70 71 /* Structure defining an tlb entry data set. */ 72 73 void kvm_mips_dump_host_tlbs(void) 74 { 75 unsigned long flags; 76 77 local_irq_save(flags); 78 79 kvm_info("HOST TLBs:\n"); 80 dump_tlb_regs(); 81 pr_info("\n"); 82 dump_tlb_all(); 83 84 local_irq_restore(flags); 85 } 86 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); 87 88 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 89 { 90 struct mips_coproc *cop0 = vcpu->arch.cop0; 91 struct kvm_mips_tlb tlb; 92 int i; 93 94 kvm_info("Guest TLBs:\n"); 95 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); 96 97 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 98 tlb = vcpu->arch.guest_tlb[i]; 99 kvm_info("TLB%c%3d Hi 0x%08lx ", 100 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V 101 ? ' ' : '*', 102 i, tlb.tlb_hi); 103 kvm_info("Lo0=0x%09llx %c%c attr %lx ", 104 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), 105 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', 106 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', 107 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); 108 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", 109 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), 110 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', 111 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', 112 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, 113 tlb.tlb_mask); 114 } 115 } 116 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); 117 118 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 119 { 120 int i; 121 int index = -1; 122 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; 123 124 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 125 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && 126 TLB_HI_ASID_HIT(tlb[i], entryhi)) { 127 index = i; 128 break; 129 } 130 } 131 132 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", 133 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); 134 135 return index; 136 } 137 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); 138 139 static int _kvm_mips_host_tlb_inv(unsigned long entryhi) 140 { 141 int idx; 142 143 write_c0_entryhi(entryhi); 144 mtc0_tlbw_hazard(); 145 146 tlb_probe(); 147 tlb_probe_hazard(); 148 idx = read_c0_index(); 149 150 if (idx >= current_cpu_data.tlbsize) 151 BUG(); 152 153 if (idx >= 0) { 154 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 155 write_c0_entrylo0(0); 156 write_c0_entrylo1(0); 157 mtc0_tlbw_hazard(); 158 159 tlb_write_indexed(); 160 tlbw_use_hazard(); 161 } 162 163 return idx; 164 } 165 166 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, 167 bool user, bool kernel) 168 { 169 int idx_user, idx_kernel; 170 unsigned long flags, old_entryhi; 171 172 local_irq_save(flags); 173 174 old_entryhi = read_c0_entryhi(); 175 176 if (user) 177 idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 178 kvm_mips_get_user_asid(vcpu)); 179 if (kernel) 180 idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 181 kvm_mips_get_kernel_asid(vcpu)); 182 183 write_c0_entryhi(old_entryhi); 184 mtc0_tlbw_hazard(); 185 186 local_irq_restore(flags); 187 188 /* 189 * We don't want to get reserved instruction exceptions for missing tlb 190 * entries. 191 */ 192 if (cpu_has_vtag_icache) 193 flush_icache_all(); 194 195 if (user && idx_user >= 0) 196 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", 197 __func__, (va & VPN2_MASK) | 198 kvm_mips_get_user_asid(vcpu), idx_user); 199 if (kernel && idx_kernel >= 0) 200 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", 201 __func__, (va & VPN2_MASK) | 202 kvm_mips_get_kernel_asid(vcpu), idx_kernel); 203 204 return 0; 205 } 206 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 207 208 #ifdef CONFIG_KVM_MIPS_VZ 209 210 /* GuestID management */ 211 212 /** 213 * clear_root_gid() - Set GuestCtl1.RID for normal root operation. 214 */ 215 static inline void clear_root_gid(void) 216 { 217 if (cpu_has_guestid) { 218 clear_c0_guestctl1(MIPS_GCTL1_RID); 219 mtc0_tlbw_hazard(); 220 } 221 } 222 223 /** 224 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID. 225 * 226 * Sets the root GuestID to match the current guest GuestID, for TLB operation 227 * on the GPA->RPA mappings in the root TLB. 228 * 229 * The caller must be sure to disable HTW while the root GID is set, and 230 * possibly longer if TLB registers are modified. 231 */ 232 static inline void set_root_gid_to_guest_gid(void) 233 { 234 unsigned int guestctl1; 235 236 if (cpu_has_guestid) { 237 back_to_back_c0_hazard(); 238 guestctl1 = read_c0_guestctl1(); 239 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) | 240 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT) 241 << MIPS_GCTL1_RID_SHIFT; 242 write_c0_guestctl1(guestctl1); 243 mtc0_tlbw_hazard(); 244 } 245 } 246 247 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 248 { 249 int idx; 250 unsigned long flags, old_entryhi; 251 252 local_irq_save(flags); 253 htw_stop(); 254 255 /* Set root GuestID for root probe and write of guest TLB entry */ 256 set_root_gid_to_guest_gid(); 257 258 old_entryhi = read_c0_entryhi(); 259 260 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 261 kvm_mips_get_root_asid(vcpu)); 262 263 write_c0_entryhi(old_entryhi); 264 clear_root_gid(); 265 mtc0_tlbw_hazard(); 266 267 htw_start(); 268 local_irq_restore(flags); 269 270 /* 271 * We don't want to get reserved instruction exceptions for missing tlb 272 * entries. 273 */ 274 if (cpu_has_vtag_icache) 275 flush_icache_all(); 276 277 if (idx > 0) 278 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n", 279 __func__, (va & VPN2_MASK) | 280 kvm_mips_get_root_asid(vcpu), idx); 281 282 return 0; 283 } 284 EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv); 285 286 /** 287 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping. 288 * @vcpu: KVM VCPU pointer. 289 * @gpa: Guest virtual address in a TLB mapped guest segment. 290 * @gpa: Ponter to output guest physical address it maps to. 291 * 292 * Converts a guest virtual address in a guest TLB mapped segment to a guest 293 * physical address, by probing the guest TLB. 294 * 295 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been 296 * written. 297 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not 298 * have been written. 299 */ 300 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, 301 unsigned long *gpa) 302 { 303 unsigned long o_entryhi, o_entrylo[2], o_pagemask; 304 unsigned int o_index; 305 unsigned long entrylo[2], pagemask, pagemaskbit, pa; 306 unsigned long flags; 307 int index; 308 309 /* Probe the guest TLB for a mapping */ 310 local_irq_save(flags); 311 /* Set root GuestID for root probe of guest TLB entry */ 312 htw_stop(); 313 set_root_gid_to_guest_gid(); 314 315 o_entryhi = read_gc0_entryhi(); 316 o_index = read_gc0_index(); 317 318 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); 319 mtc0_tlbw_hazard(); 320 guest_tlb_probe(); 321 tlb_probe_hazard(); 322 323 index = read_gc0_index(); 324 if (index < 0) { 325 /* No match, fail */ 326 write_gc0_entryhi(o_entryhi); 327 write_gc0_index(o_index); 328 329 clear_root_gid(); 330 htw_start(); 331 local_irq_restore(flags); 332 return -EFAULT; 333 } 334 335 /* Match! read the TLB entry */ 336 o_entrylo[0] = read_gc0_entrylo0(); 337 o_entrylo[1] = read_gc0_entrylo1(); 338 o_pagemask = read_gc0_pagemask(); 339 340 mtc0_tlbr_hazard(); 341 guest_tlb_read(); 342 tlb_read_hazard(); 343 344 entrylo[0] = read_gc0_entrylo0(); 345 entrylo[1] = read_gc0_entrylo1(); 346 pagemask = ~read_gc0_pagemask() & ~0x1fffl; 347 348 write_gc0_entryhi(o_entryhi); 349 write_gc0_index(o_index); 350 write_gc0_entrylo0(o_entrylo[0]); 351 write_gc0_entrylo1(o_entrylo[1]); 352 write_gc0_pagemask(o_pagemask); 353 354 clear_root_gid(); 355 htw_start(); 356 local_irq_restore(flags); 357 358 /* Select one of the EntryLo values and interpret the GPA */ 359 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1; 360 pa = entrylo[!!(gva & pagemaskbit)]; 361 362 /* 363 * TLB entry may have become invalid since TLB probe if physical FTLB 364 * entries are shared between threads (e.g. I6400). 365 */ 366 if (!(pa & ENTRYLO_V)) 367 return -EFAULT; 368 369 /* 370 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is 371 * split with XI/RI in the middle. 372 */ 373 pa = (pa << 6) & ~0xfffl; 374 pa |= gva & ~(pagemask | pagemaskbit); 375 376 *gpa = pa; 377 return 0; 378 } 379 EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup); 380 381 /** 382 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for 383 * guests. 384 * 385 * Invalidate all entries in root tlb which are GPA mappings. 386 */ 387 void kvm_vz_local_flush_roottlb_all_guests(void) 388 { 389 unsigned long flags; 390 unsigned long old_entryhi, old_pagemask, old_guestctl1; 391 int entry; 392 393 if (WARN_ON(!cpu_has_guestid)) 394 return; 395 396 local_irq_save(flags); 397 htw_stop(); 398 399 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */ 400 old_entryhi = read_c0_entryhi(); 401 old_pagemask = read_c0_pagemask(); 402 old_guestctl1 = read_c0_guestctl1(); 403 404 /* 405 * Invalidate guest entries in root TLB while leaving root entries 406 * intact when possible. 407 */ 408 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { 409 write_c0_index(entry); 410 mtc0_tlbw_hazard(); 411 tlb_read(); 412 tlb_read_hazard(); 413 414 /* Don't invalidate non-guest (RVA) mappings in the root TLB */ 415 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID)) 416 continue; 417 418 /* Make sure all entries differ. */ 419 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 420 write_c0_entrylo0(0); 421 write_c0_entrylo1(0); 422 write_c0_guestctl1(0); 423 mtc0_tlbw_hazard(); 424 tlb_write_indexed(); 425 } 426 427 write_c0_entryhi(old_entryhi); 428 write_c0_pagemask(old_pagemask); 429 write_c0_guestctl1(old_guestctl1); 430 tlbw_use_hazard(); 431 432 htw_start(); 433 local_irq_restore(flags); 434 } 435 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests); 436 437 /** 438 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries. 439 * 440 * Invalidate all entries in guest tlb irrespective of guestid. 441 */ 442 void kvm_vz_local_flush_guesttlb_all(void) 443 { 444 unsigned long flags; 445 unsigned long old_index; 446 unsigned long old_entryhi; 447 unsigned long old_entrylo[2]; 448 unsigned long old_pagemask; 449 int entry; 450 451 local_irq_save(flags); 452 453 /* Preserve all clobbered guest registers */ 454 old_index = read_gc0_index(); 455 old_entryhi = read_gc0_entryhi(); 456 old_entrylo[0] = read_gc0_entrylo0(); 457 old_entrylo[1] = read_gc0_entrylo1(); 458 old_pagemask = read_gc0_pagemask(); 459 460 /* Invalidate guest entries in guest TLB */ 461 write_gc0_entrylo0(0); 462 write_gc0_entrylo1(0); 463 write_gc0_pagemask(0); 464 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) { 465 /* Make sure all entries differ. */ 466 write_gc0_index(entry); 467 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry)); 468 mtc0_tlbw_hazard(); 469 guest_tlb_write_indexed(); 470 } 471 write_gc0_index(old_index); 472 write_gc0_entryhi(old_entryhi); 473 write_gc0_entrylo0(old_entrylo[0]); 474 write_gc0_entrylo1(old_entrylo[1]); 475 write_gc0_pagemask(old_pagemask); 476 tlbw_use_hazard(); 477 478 local_irq_restore(flags); 479 } 480 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all); 481 482 /** 483 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries. 484 * @buf: Buffer to write TLB entries into. 485 * @index: Start index. 486 * @count: Number of entries to save. 487 * 488 * Save a range of guest TLB entries. The caller must ensure interrupts are 489 * disabled. 490 */ 491 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, 492 unsigned int count) 493 { 494 unsigned int end = index + count; 495 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; 496 unsigned int guestctl1 = 0; 497 int old_index, i; 498 499 /* Save registers we're about to clobber */ 500 old_index = read_gc0_index(); 501 old_entryhi = read_gc0_entryhi(); 502 old_entrylo0 = read_gc0_entrylo0(); 503 old_entrylo1 = read_gc0_entrylo1(); 504 old_pagemask = read_gc0_pagemask(); 505 506 /* Set root GuestID for root probe */ 507 htw_stop(); 508 set_root_gid_to_guest_gid(); 509 if (cpu_has_guestid) 510 guestctl1 = read_c0_guestctl1(); 511 512 /* Read each entry from guest TLB */ 513 for (i = index; i < end; ++i, ++buf) { 514 write_gc0_index(i); 515 516 mtc0_tlbr_hazard(); 517 guest_tlb_read(); 518 tlb_read_hazard(); 519 520 if (cpu_has_guestid && 521 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) { 522 /* Entry invalid or belongs to another guest */ 523 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 524 buf->tlb_lo[0] = 0; 525 buf->tlb_lo[1] = 0; 526 buf->tlb_mask = 0; 527 } else { 528 /* Entry belongs to the right guest */ 529 buf->tlb_hi = read_gc0_entryhi(); 530 buf->tlb_lo[0] = read_gc0_entrylo0(); 531 buf->tlb_lo[1] = read_gc0_entrylo1(); 532 buf->tlb_mask = read_gc0_pagemask(); 533 } 534 } 535 536 /* Clear root GuestID again */ 537 clear_root_gid(); 538 htw_start(); 539 540 /* Restore clobbered registers */ 541 write_gc0_index(old_index); 542 write_gc0_entryhi(old_entryhi); 543 write_gc0_entrylo0(old_entrylo0); 544 write_gc0_entrylo1(old_entrylo1); 545 write_gc0_pagemask(old_pagemask); 546 547 tlbw_use_hazard(); 548 } 549 EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb); 550 551 /** 552 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries. 553 * @buf: Buffer to read TLB entries from. 554 * @index: Start index. 555 * @count: Number of entries to load. 556 * 557 * Load a range of guest TLB entries. The caller must ensure interrupts are 558 * disabled. 559 */ 560 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, 561 unsigned int count) 562 { 563 unsigned int end = index + count; 564 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; 565 int old_index, i; 566 567 /* Save registers we're about to clobber */ 568 old_index = read_gc0_index(); 569 old_entryhi = read_gc0_entryhi(); 570 old_entrylo0 = read_gc0_entrylo0(); 571 old_entrylo1 = read_gc0_entrylo1(); 572 old_pagemask = read_gc0_pagemask(); 573 574 /* Set root GuestID for root probe */ 575 htw_stop(); 576 set_root_gid_to_guest_gid(); 577 578 /* Write each entry to guest TLB */ 579 for (i = index; i < end; ++i, ++buf) { 580 write_gc0_index(i); 581 write_gc0_entryhi(buf->tlb_hi); 582 write_gc0_entrylo0(buf->tlb_lo[0]); 583 write_gc0_entrylo1(buf->tlb_lo[1]); 584 write_gc0_pagemask(buf->tlb_mask); 585 586 mtc0_tlbw_hazard(); 587 guest_tlb_write_indexed(); 588 } 589 590 /* Clear root GuestID again */ 591 clear_root_gid(); 592 htw_start(); 593 594 /* Restore clobbered registers */ 595 write_gc0_index(old_index); 596 write_gc0_entryhi(old_entryhi); 597 write_gc0_entrylo0(old_entrylo0); 598 write_gc0_entrylo1(old_entrylo1); 599 write_gc0_pagemask(old_pagemask); 600 601 tlbw_use_hazard(); 602 } 603 EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb); 604 605 #endif 606 607 /** 608 * kvm_mips_suspend_mm() - Suspend the active mm. 609 * @cpu The CPU we're running on. 610 * 611 * Suspend the active_mm, ready for a switch to a KVM guest virtual address 612 * space. This is left active for the duration of guest context, including time 613 * with interrupts enabled, so we need to be careful not to confuse e.g. cache 614 * management IPIs. 615 * 616 * kvm_mips_resume_mm() should be called before context switching to a different 617 * process so we don't need to worry about reference counting. 618 * 619 * This needs to be in static kernel code to avoid exporting init_mm. 620 */ 621 void kvm_mips_suspend_mm(int cpu) 622 { 623 cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); 624 current->active_mm = &init_mm; 625 } 626 EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); 627 628 /** 629 * kvm_mips_resume_mm() - Resume the current process mm. 630 * @cpu The CPU we're running on. 631 * 632 * Resume the mm of the current process, after a switch back from a KVM guest 633 * virtual address space (see kvm_mips_suspend_mm()). 634 */ 635 void kvm_mips_resume_mm(int cpu) 636 { 637 cpumask_set_cpu(cpu, mm_cpumask(current->mm)); 638 current->active_mm = current->mm; 639 } 640 EXPORT_SYMBOL_GPL(kvm_mips_resume_mm); 641