1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 8 * 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/mm.h> 16 #include <linux/delay.h> 17 #include <linux/module.h> 18 #include <linux/kvm_host.h> 19 #include <linux/srcu.h> 20 21 #include <asm/cpu.h> 22 #include <asm/bootinfo.h> 23 #include <asm/mmu_context.h> 24 #include <asm/pgtable.h> 25 #include <asm/cacheflush.h> 26 #include <asm/tlb.h> 27 28 #undef CONFIG_MIPS_MT 29 #include <asm/r4kcache.h> 30 #define CONFIG_MIPS_MT 31 32 #define KVM_GUEST_PC_TLB 0 33 #define KVM_GUEST_SP_TLB 1 34 35 #define PRIx64 "llx" 36 37 atomic_t kvm_mips_instance; 38 EXPORT_SYMBOL_GPL(kvm_mips_instance); 39 40 /* These function pointers are initialized once the KVM module is loaded */ 41 kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); 42 EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn); 43 44 void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn); 45 EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean); 46 47 bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn); 48 EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn); 49 50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 51 { 52 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; 53 } 54 55 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 56 { 57 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; 58 } 59 60 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) 61 { 62 return vcpu->kvm->arch.commpage_tlb; 63 } 64 65 /* Structure defining an tlb entry data set. */ 66 67 void kvm_mips_dump_host_tlbs(void) 68 { 69 unsigned long old_entryhi; 70 unsigned long old_pagemask; 71 struct kvm_mips_tlb tlb; 72 unsigned long flags; 73 int i; 74 75 local_irq_save(flags); 76 77 old_entryhi = read_c0_entryhi(); 78 old_pagemask = read_c0_pagemask(); 79 80 kvm_info("HOST TLBs:\n"); 81 kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); 82 83 for (i = 0; i < current_cpu_data.tlbsize; i++) { 84 write_c0_index(i); 85 mtc0_tlbw_hazard(); 86 87 tlb_read(); 88 tlbw_use_hazard(); 89 90 tlb.tlb_hi = read_c0_entryhi(); 91 tlb.tlb_lo0 = read_c0_entrylo0(); 92 tlb.tlb_lo1 = read_c0_entrylo1(); 93 tlb.tlb_mask = read_c0_pagemask(); 94 95 kvm_info("TLB%c%3d Hi 0x%08lx ", 96 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', 97 i, tlb.tlb_hi); 98 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", 99 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), 100 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', 101 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', 102 (tlb.tlb_lo0 >> 3) & 7); 103 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", 104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), 105 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', 106 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', 107 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); 108 } 109 write_c0_entryhi(old_entryhi); 110 write_c0_pagemask(old_pagemask); 111 mtc0_tlbw_hazard(); 112 local_irq_restore(flags); 113 } 114 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); 115 116 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 117 { 118 struct mips_coproc *cop0 = vcpu->arch.cop0; 119 struct kvm_mips_tlb tlb; 120 int i; 121 122 kvm_info("Guest TLBs:\n"); 123 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); 124 125 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 126 tlb = vcpu->arch.guest_tlb[i]; 127 kvm_info("TLB%c%3d Hi 0x%08lx ", 128 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', 129 i, tlb.tlb_hi); 130 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", 131 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), 132 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', 133 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', 134 (tlb.tlb_lo0 >> 3) & 7); 135 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", 136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), 137 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', 138 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', 139 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); 140 } 141 } 142 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); 143 144 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 145 { 146 int srcu_idx, err = 0; 147 kvm_pfn_t pfn; 148 149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) 150 return 0; 151 152 srcu_idx = srcu_read_lock(&kvm->srcu); 153 pfn = kvm_mips_gfn_to_pfn(kvm, gfn); 154 155 if (kvm_mips_is_error_pfn(pfn)) { 156 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); 157 err = -EFAULT; 158 goto out; 159 } 160 161 kvm->arch.guest_pmap[gfn] = pfn; 162 out: 163 srcu_read_unlock(&kvm->srcu, srcu_idx); 164 return err; 165 } 166 167 /* Translate guest KSEG0 addresses to Host PA */ 168 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, 169 unsigned long gva) 170 { 171 gfn_t gfn; 172 uint32_t offset = gva & ~PAGE_MASK; 173 struct kvm *kvm = vcpu->kvm; 174 175 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { 176 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__, 177 __builtin_return_address(0), gva); 178 return KVM_INVALID_PAGE; 179 } 180 181 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); 182 183 if (gfn >= kvm->arch.guest_pmap_npages) { 184 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn, 185 gva); 186 return KVM_INVALID_PAGE; 187 } 188 189 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) 190 return KVM_INVALID_ADDR; 191 192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; 193 } 194 EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa); 195 196 /* XXXKYMA: Must be called with interrupts disabled */ 197 /* set flush_dcache_mask == 0 if no dcache flush required */ 198 int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, 199 unsigned long entrylo0, unsigned long entrylo1, 200 int flush_dcache_mask) 201 { 202 unsigned long flags; 203 unsigned long old_entryhi; 204 int idx; 205 206 local_irq_save(flags); 207 208 old_entryhi = read_c0_entryhi(); 209 write_c0_entryhi(entryhi); 210 mtc0_tlbw_hazard(); 211 212 tlb_probe(); 213 tlb_probe_hazard(); 214 idx = read_c0_index(); 215 216 if (idx > current_cpu_data.tlbsize) { 217 kvm_err("%s: Invalid Index: %d\n", __func__, idx); 218 kvm_mips_dump_host_tlbs(); 219 local_irq_restore(flags); 220 return -1; 221 } 222 223 write_c0_entrylo0(entrylo0); 224 write_c0_entrylo1(entrylo1); 225 mtc0_tlbw_hazard(); 226 227 if (idx < 0) 228 tlb_write_random(); 229 else 230 tlb_write_indexed(); 231 tlbw_use_hazard(); 232 233 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", 234 vcpu->arch.pc, idx, read_c0_entryhi(), 235 read_c0_entrylo0(), read_c0_entrylo1()); 236 237 /* Flush D-cache */ 238 if (flush_dcache_mask) { 239 if (entrylo0 & MIPS3_PG_V) { 240 ++vcpu->stat.flush_dcache_exits; 241 flush_data_cache_page((entryhi & VPN2_MASK) & 242 ~flush_dcache_mask); 243 } 244 if (entrylo1 & MIPS3_PG_V) { 245 ++vcpu->stat.flush_dcache_exits; 246 flush_data_cache_page(((entryhi & VPN2_MASK) & 247 ~flush_dcache_mask) | 248 (0x1 << PAGE_SHIFT)); 249 } 250 } 251 252 /* Restore old ASID */ 253 write_c0_entryhi(old_entryhi); 254 mtc0_tlbw_hazard(); 255 tlbw_use_hazard(); 256 local_irq_restore(flags); 257 return 0; 258 } 259 260 /* XXXKYMA: Must be called with interrupts disabled */ 261 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, 262 struct kvm_vcpu *vcpu) 263 { 264 gfn_t gfn; 265 kvm_pfn_t pfn0, pfn1; 266 unsigned long vaddr = 0; 267 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 268 int even; 269 struct kvm *kvm = vcpu->kvm; 270 const int flush_dcache_mask = 0; 271 int ret; 272 273 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { 274 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); 275 kvm_mips_dump_host_tlbs(); 276 return -1; 277 } 278 279 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); 280 if (gfn >= kvm->arch.guest_pmap_npages) { 281 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, 282 gfn, badvaddr); 283 kvm_mips_dump_host_tlbs(); 284 return -1; 285 } 286 even = !(gfn & 0x1); 287 vaddr = badvaddr & (PAGE_MASK << 1); 288 289 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) 290 return -1; 291 292 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0) 293 return -1; 294 295 if (even) { 296 pfn0 = kvm->arch.guest_pmap[gfn]; 297 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1]; 298 } else { 299 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1]; 300 pfn1 = kvm->arch.guest_pmap[gfn]; 301 } 302 303 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | 304 (1 << 2) | (0x1 << 1); 305 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | 306 (1 << 2) | (0x1 << 1); 307 308 preempt_disable(); 309 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); 310 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 311 flush_dcache_mask); 312 preempt_enable(); 313 314 return ret; 315 } 316 EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault); 317 318 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 319 struct kvm_vcpu *vcpu) 320 { 321 kvm_pfn_t pfn0, pfn1; 322 unsigned long flags, old_entryhi = 0, vaddr = 0; 323 unsigned long entrylo0 = 0, entrylo1 = 0; 324 325 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; 326 pfn1 = 0; 327 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | 328 (1 << 2) | (0x1 << 1); 329 entrylo1 = 0; 330 331 local_irq_save(flags); 332 333 old_entryhi = read_c0_entryhi(); 334 vaddr = badvaddr & (PAGE_MASK << 1); 335 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu)); 336 mtc0_tlbw_hazard(); 337 write_c0_entrylo0(entrylo0); 338 mtc0_tlbw_hazard(); 339 write_c0_entrylo1(entrylo1); 340 mtc0_tlbw_hazard(); 341 write_c0_index(kvm_mips_get_commpage_asid(vcpu)); 342 mtc0_tlbw_hazard(); 343 tlb_write_indexed(); 344 mtc0_tlbw_hazard(); 345 tlbw_use_hazard(); 346 347 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", 348 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), 349 read_c0_entrylo0(), read_c0_entrylo1()); 350 351 /* Restore old ASID */ 352 write_c0_entryhi(old_entryhi); 353 mtc0_tlbw_hazard(); 354 tlbw_use_hazard(); 355 local_irq_restore(flags); 356 357 return 0; 358 } 359 EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault); 360 361 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 362 struct kvm_mips_tlb *tlb, 363 unsigned long *hpa0, 364 unsigned long *hpa1) 365 { 366 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 367 struct kvm *kvm = vcpu->kvm; 368 kvm_pfn_t pfn0, pfn1; 369 int ret; 370 371 if ((tlb->tlb_hi & VPN2_MASK) == 0) { 372 pfn0 = 0; 373 pfn1 = 0; 374 } else { 375 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) 376 >> PAGE_SHIFT) < 0) 377 return -1; 378 379 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) 380 >> PAGE_SHIFT) < 0) 381 return -1; 382 383 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) 384 >> PAGE_SHIFT]; 385 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) 386 >> PAGE_SHIFT]; 387 } 388 389 if (hpa0) 390 *hpa0 = pfn0 << PAGE_SHIFT; 391 392 if (hpa1) 393 *hpa1 = pfn1 << PAGE_SHIFT; 394 395 /* Get attributes from the Guest TLB */ 396 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | 397 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); 398 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | 399 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); 400 401 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, 402 tlb->tlb_lo0, tlb->tlb_lo1); 403 404 preempt_disable(); 405 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? 406 kvm_mips_get_kernel_asid(vcpu) : 407 kvm_mips_get_user_asid(vcpu)); 408 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 409 tlb->tlb_mask); 410 preempt_enable(); 411 412 return ret; 413 } 414 EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault); 415 416 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 417 { 418 int i; 419 int index = -1; 420 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; 421 422 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 423 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && 424 TLB_HI_ASID_HIT(tlb[i], entryhi)) { 425 index = i; 426 break; 427 } 428 } 429 430 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", 431 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1); 432 433 return index; 434 } 435 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); 436 437 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) 438 { 439 unsigned long old_entryhi, flags; 440 int idx; 441 442 local_irq_save(flags); 443 444 old_entryhi = read_c0_entryhi(); 445 446 if (KVM_GUEST_KERNEL_MODE(vcpu)) 447 write_c0_entryhi((vaddr & VPN2_MASK) | 448 kvm_mips_get_kernel_asid(vcpu)); 449 else { 450 write_c0_entryhi((vaddr & VPN2_MASK) | 451 kvm_mips_get_user_asid(vcpu)); 452 } 453 454 mtc0_tlbw_hazard(); 455 456 tlb_probe(); 457 tlb_probe_hazard(); 458 idx = read_c0_index(); 459 460 /* Restore old ASID */ 461 write_c0_entryhi(old_entryhi); 462 mtc0_tlbw_hazard(); 463 tlbw_use_hazard(); 464 465 local_irq_restore(flags); 466 467 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); 468 469 return idx; 470 } 471 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup); 472 473 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 474 { 475 int idx; 476 unsigned long flags, old_entryhi; 477 478 local_irq_save(flags); 479 480 old_entryhi = read_c0_entryhi(); 481 482 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); 483 mtc0_tlbw_hazard(); 484 485 tlb_probe(); 486 tlb_probe_hazard(); 487 idx = read_c0_index(); 488 489 if (idx >= current_cpu_data.tlbsize) 490 BUG(); 491 492 if (idx > 0) { 493 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 494 mtc0_tlbw_hazard(); 495 496 write_c0_entrylo0(0); 497 mtc0_tlbw_hazard(); 498 499 write_c0_entrylo1(0); 500 mtc0_tlbw_hazard(); 501 502 tlb_write_indexed(); 503 mtc0_tlbw_hazard(); 504 } 505 506 write_c0_entryhi(old_entryhi); 507 mtc0_tlbw_hazard(); 508 tlbw_use_hazard(); 509 510 local_irq_restore(flags); 511 512 if (idx > 0) 513 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, 514 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx); 515 516 return 0; 517 } 518 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 519 520 void kvm_mips_flush_host_tlb(int skip_kseg0) 521 { 522 unsigned long flags; 523 unsigned long old_entryhi, entryhi; 524 unsigned long old_pagemask; 525 int entry = 0; 526 int maxentry = current_cpu_data.tlbsize; 527 528 local_irq_save(flags); 529 530 old_entryhi = read_c0_entryhi(); 531 old_pagemask = read_c0_pagemask(); 532 533 /* Blast 'em all away. */ 534 for (entry = 0; entry < maxentry; entry++) { 535 write_c0_index(entry); 536 mtc0_tlbw_hazard(); 537 538 if (skip_kseg0) { 539 tlb_read(); 540 tlbw_use_hazard(); 541 542 entryhi = read_c0_entryhi(); 543 544 /* Don't blow away guest kernel entries */ 545 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) 546 continue; 547 } 548 549 /* Make sure all entries differ. */ 550 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 551 mtc0_tlbw_hazard(); 552 write_c0_entrylo0(0); 553 mtc0_tlbw_hazard(); 554 write_c0_entrylo1(0); 555 mtc0_tlbw_hazard(); 556 557 tlb_write_indexed(); 558 mtc0_tlbw_hazard(); 559 } 560 561 tlbw_use_hazard(); 562 563 write_c0_entryhi(old_entryhi); 564 write_c0_pagemask(old_pagemask); 565 mtc0_tlbw_hazard(); 566 tlbw_use_hazard(); 567 568 local_irq_restore(flags); 569 } 570 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb); 571 572 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, 573 struct kvm_vcpu *vcpu) 574 { 575 unsigned long asid = asid_cache(cpu); 576 577 asid += ASID_INC; 578 if (!(asid & ASID_MASK)) { 579 if (cpu_has_vtag_icache) 580 flush_icache_all(); 581 582 kvm_local_flush_tlb_all(); /* start new asid cycle */ 583 584 if (!asid) /* fix version if needed */ 585 asid = ASID_FIRST_VERSION; 586 } 587 588 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 589 } 590 591 void kvm_local_flush_tlb_all(void) 592 { 593 unsigned long flags; 594 unsigned long old_ctx; 595 int entry = 0; 596 597 local_irq_save(flags); 598 /* Save old context and create impossible VPN2 value */ 599 old_ctx = read_c0_entryhi(); 600 write_c0_entrylo0(0); 601 write_c0_entrylo1(0); 602 603 /* Blast 'em all away. */ 604 while (entry < current_cpu_data.tlbsize) { 605 /* Make sure all entries differ. */ 606 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 607 write_c0_index(entry); 608 mtc0_tlbw_hazard(); 609 tlb_write_indexed(); 610 entry++; 611 } 612 tlbw_use_hazard(); 613 write_c0_entryhi(old_ctx); 614 mtc0_tlbw_hazard(); 615 616 local_irq_restore(flags); 617 } 618 EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all); 619 620 /** 621 * kvm_mips_migrate_count() - Migrate timer. 622 * @vcpu: Virtual CPU. 623 * 624 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it 625 * if it was running prior to being cancelled. 626 * 627 * Must be called when the VCPU is migrated to a different CPU to ensure that 628 * timer expiry during guest execution interrupts the guest and causes the 629 * interrupt to be delivered in a timely manner. 630 */ 631 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) 632 { 633 if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) 634 hrtimer_restart(&vcpu->arch.comparecount_timer); 635 } 636 637 /* Restore ASID once we are scheduled back after preemption */ 638 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 639 { 640 unsigned long flags; 641 int newasid = 0; 642 643 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); 644 645 /* Allocate new kernel and user ASIDs if needed */ 646 647 local_irq_save(flags); 648 649 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & 650 ASID_VERSION_MASK) { 651 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); 652 vcpu->arch.guest_kernel_asid[cpu] = 653 vcpu->arch.guest_kernel_mm.context.asid[cpu]; 654 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); 655 vcpu->arch.guest_user_asid[cpu] = 656 vcpu->arch.guest_user_mm.context.asid[cpu]; 657 newasid++; 658 659 kvm_debug("[%d]: cpu_context: %#lx\n", cpu, 660 cpu_context(cpu, current->mm)); 661 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", 662 cpu, vcpu->arch.guest_kernel_asid[cpu]); 663 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, 664 vcpu->arch.guest_user_asid[cpu]); 665 } 666 667 if (vcpu->arch.last_sched_cpu != cpu) { 668 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", 669 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); 670 /* 671 * Migrate the timer interrupt to the current CPU so that it 672 * always interrupts the guest and synchronously triggers a 673 * guest timer interrupt. 674 */ 675 kvm_mips_migrate_count(vcpu); 676 } 677 678 if (!newasid) { 679 /* 680 * If we preempted while the guest was executing, then reload 681 * the pre-empted ASID 682 */ 683 if (current->flags & PF_VCPU) { 684 write_c0_entryhi(vcpu->arch. 685 preempt_entryhi & ASID_MASK); 686 ehb(); 687 } 688 } else { 689 /* New ASIDs were allocated for the VM */ 690 691 /* 692 * Were we in guest context? If so then the pre-empted ASID is 693 * no longer valid, we need to set it to what it should be based 694 * on the mode of the Guest (Kernel/User) 695 */ 696 if (current->flags & PF_VCPU) { 697 if (KVM_GUEST_KERNEL_MODE(vcpu)) 698 write_c0_entryhi(vcpu->arch. 699 guest_kernel_asid[cpu] & 700 ASID_MASK); 701 else 702 write_c0_entryhi(vcpu->arch. 703 guest_user_asid[cpu] & 704 ASID_MASK); 705 ehb(); 706 } 707 } 708 709 /* restore guest state to registers */ 710 kvm_mips_callbacks->vcpu_set_regs(vcpu); 711 712 local_irq_restore(flags); 713 714 } 715 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load); 716 717 /* ASID can change if another task is scheduled during preemption */ 718 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 719 { 720 unsigned long flags; 721 uint32_t cpu; 722 723 local_irq_save(flags); 724 725 cpu = smp_processor_id(); 726 727 vcpu->arch.preempt_entryhi = read_c0_entryhi(); 728 vcpu->arch.last_sched_cpu = cpu; 729 730 /* save guest state in registers */ 731 kvm_mips_callbacks->vcpu_get_regs(vcpu); 732 733 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & 734 ASID_VERSION_MASK)) { 735 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, 736 cpu_context(cpu, current->mm)); 737 drop_mmu_context(current->mm, cpu); 738 } 739 write_c0_entryhi(cpu_asid(cpu, current->mm)); 740 ehb(); 741 742 local_irq_restore(flags); 743 } 744 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put); 745 746 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) 747 { 748 struct mips_coproc *cop0 = vcpu->arch.cop0; 749 unsigned long paddr, flags, vpn2, asid; 750 uint32_t inst; 751 int index; 752 753 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 || 754 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { 755 local_irq_save(flags); 756 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc); 757 if (index >= 0) { 758 inst = *(opc); 759 } else { 760 vpn2 = (unsigned long) opc & VPN2_MASK; 761 asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK; 762 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid); 763 if (index < 0) { 764 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", 765 __func__, opc, vcpu, read_c0_entryhi()); 766 kvm_mips_dump_host_tlbs(); 767 local_irq_restore(flags); 768 return KVM_INVALID_INST; 769 } 770 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, 771 &vcpu->arch. 772 guest_tlb[index], 773 NULL, NULL); 774 inst = *(opc); 775 } 776 local_irq_restore(flags); 777 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { 778 paddr = 779 kvm_mips_translate_guest_kseg0_to_hpa(vcpu, 780 (unsigned long) opc); 781 inst = *(uint32_t *) CKSEG0ADDR(paddr); 782 } else { 783 kvm_err("%s: illegal address: %p\n", __func__, opc); 784 return KVM_INVALID_INST; 785 } 786 787 return inst; 788 } 789 EXPORT_SYMBOL_GPL(kvm_get_inst); 790