1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 8 * 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/mm.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/kvm_host.h> 19 #include <linux/srcu.h> 20 21 #include <asm/cpu.h> 22 #include <asm/bootinfo.h> 23 #include <asm/mmu_context.h> 24 #include <asm/cacheflush.h> 25 #include <asm/tlb.h> 26 #include <asm/tlbdebug.h> 27 28 #undef CONFIG_MIPS_MT 29 #include <asm/r4kcache.h> 30 #define CONFIG_MIPS_MT 31 32 #define KVM_GUEST_PC_TLB 0 33 #define KVM_GUEST_SP_TLB 1 34 35 #ifdef CONFIG_KVM_MIPS_VZ 36 unsigned long GUESTID_MASK; 37 EXPORT_SYMBOL_GPL(GUESTID_MASK); 38 unsigned long GUESTID_FIRST_VERSION; 39 EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION); 40 unsigned long GUESTID_VERSION_MASK; 41 EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK); 42 43 static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) 44 { 45 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm; 46 47 if (cpu_has_guestid) 48 return 0; 49 else 50 return cpu_asid(smp_processor_id(), gpa_mm); 51 } 52 #endif 53 54 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 55 { 56 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 57 int cpu = smp_processor_id(); 58 59 return cpu_asid(cpu, kern_mm); 60 } 61 62 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 63 { 64 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 65 int cpu = smp_processor_id(); 66 67 return cpu_asid(cpu, user_mm); 68 } 69 70 /* Structure defining an tlb entry data set. */ 71 72 void kvm_mips_dump_host_tlbs(void) 73 { 74 unsigned long flags; 75 76 local_irq_save(flags); 77 78 kvm_info("HOST TLBs:\n"); 79 dump_tlb_regs(); 80 pr_info("\n"); 81 dump_tlb_all(); 82 83 local_irq_restore(flags); 84 } 85 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); 86 87 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 88 { 89 struct mips_coproc *cop0 = vcpu->arch.cop0; 90 struct kvm_mips_tlb tlb; 91 int i; 92 93 kvm_info("Guest TLBs:\n"); 94 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); 95 96 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 97 tlb = vcpu->arch.guest_tlb[i]; 98 kvm_info("TLB%c%3d Hi 0x%08lx ", 99 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V 100 ? ' ' : '*', 101 i, tlb.tlb_hi); 102 kvm_info("Lo0=0x%09llx %c%c attr %lx ", 103 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), 104 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', 105 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', 106 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); 107 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", 108 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), 109 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', 110 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', 111 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, 112 tlb.tlb_mask); 113 } 114 } 115 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); 116 117 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 118 { 119 int i; 120 int index = -1; 121 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; 122 123 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 124 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && 125 TLB_HI_ASID_HIT(tlb[i], entryhi)) { 126 index = i; 127 break; 128 } 129 } 130 131 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", 132 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); 133 134 return index; 135 } 136 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); 137 138 static int _kvm_mips_host_tlb_inv(unsigned long entryhi) 139 { 140 int idx; 141 142 write_c0_entryhi(entryhi); 143 mtc0_tlbw_hazard(); 144 145 tlb_probe(); 146 tlb_probe_hazard(); 147 idx = read_c0_index(); 148 149 if (idx >= current_cpu_data.tlbsize) 150 BUG(); 151 152 if (idx >= 0) { 153 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 154 write_c0_entrylo0(0); 155 write_c0_entrylo1(0); 156 mtc0_tlbw_hazard(); 157 158 tlb_write_indexed(); 159 tlbw_use_hazard(); 160 } 161 162 return idx; 163 } 164 165 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, 166 bool user, bool kernel) 167 { 168 /* 169 * Initialize idx_user and idx_kernel to workaround bogus 170 * maybe-initialized warning when using GCC 6. 171 */ 172 int idx_user = 0, idx_kernel = 0; 173 unsigned long flags, old_entryhi; 174 175 local_irq_save(flags); 176 177 old_entryhi = read_c0_entryhi(); 178 179 if (user) 180 idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 181 kvm_mips_get_user_asid(vcpu)); 182 if (kernel) 183 idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 184 kvm_mips_get_kernel_asid(vcpu)); 185 186 write_c0_entryhi(old_entryhi); 187 mtc0_tlbw_hazard(); 188 189 local_irq_restore(flags); 190 191 /* 192 * We don't want to get reserved instruction exceptions for missing tlb 193 * entries. 194 */ 195 if (cpu_has_vtag_icache) 196 flush_icache_all(); 197 198 if (user && idx_user >= 0) 199 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", 200 __func__, (va & VPN2_MASK) | 201 kvm_mips_get_user_asid(vcpu), idx_user); 202 if (kernel && idx_kernel >= 0) 203 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", 204 __func__, (va & VPN2_MASK) | 205 kvm_mips_get_kernel_asid(vcpu), idx_kernel); 206 207 return 0; 208 } 209 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 210 211 #ifdef CONFIG_KVM_MIPS_VZ 212 213 /* GuestID management */ 214 215 /** 216 * clear_root_gid() - Set GuestCtl1.RID for normal root operation. 217 */ 218 static inline void clear_root_gid(void) 219 { 220 if (cpu_has_guestid) { 221 clear_c0_guestctl1(MIPS_GCTL1_RID); 222 mtc0_tlbw_hazard(); 223 } 224 } 225 226 /** 227 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID. 228 * 229 * Sets the root GuestID to match the current guest GuestID, for TLB operation 230 * on the GPA->RPA mappings in the root TLB. 231 * 232 * The caller must be sure to disable HTW while the root GID is set, and 233 * possibly longer if TLB registers are modified. 234 */ 235 static inline void set_root_gid_to_guest_gid(void) 236 { 237 unsigned int guestctl1; 238 239 if (cpu_has_guestid) { 240 back_to_back_c0_hazard(); 241 guestctl1 = read_c0_guestctl1(); 242 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) | 243 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT) 244 << MIPS_GCTL1_RID_SHIFT; 245 write_c0_guestctl1(guestctl1); 246 mtc0_tlbw_hazard(); 247 } 248 } 249 250 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 251 { 252 int idx; 253 unsigned long flags, old_entryhi; 254 255 local_irq_save(flags); 256 htw_stop(); 257 258 /* Set root GuestID for root probe and write of guest TLB entry */ 259 set_root_gid_to_guest_gid(); 260 261 old_entryhi = read_c0_entryhi(); 262 263 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 264 kvm_mips_get_root_asid(vcpu)); 265 266 write_c0_entryhi(old_entryhi); 267 clear_root_gid(); 268 mtc0_tlbw_hazard(); 269 270 htw_start(); 271 local_irq_restore(flags); 272 273 /* 274 * We don't want to get reserved instruction exceptions for missing tlb 275 * entries. 276 */ 277 if (cpu_has_vtag_icache) 278 flush_icache_all(); 279 280 if (idx > 0) 281 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n", 282 __func__, (va & VPN2_MASK) | 283 kvm_mips_get_root_asid(vcpu), idx); 284 285 return 0; 286 } 287 EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv); 288 289 /** 290 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping. 291 * @vcpu: KVM VCPU pointer. 292 * @gpa: Guest virtual address in a TLB mapped guest segment. 293 * @gpa: Ponter to output guest physical address it maps to. 294 * 295 * Converts a guest virtual address in a guest TLB mapped segment to a guest 296 * physical address, by probing the guest TLB. 297 * 298 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been 299 * written. 300 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not 301 * have been written. 302 */ 303 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, 304 unsigned long *gpa) 305 { 306 unsigned long o_entryhi, o_entrylo[2], o_pagemask; 307 unsigned int o_index; 308 unsigned long entrylo[2], pagemask, pagemaskbit, pa; 309 unsigned long flags; 310 int index; 311 312 /* Probe the guest TLB for a mapping */ 313 local_irq_save(flags); 314 /* Set root GuestID for root probe of guest TLB entry */ 315 htw_stop(); 316 set_root_gid_to_guest_gid(); 317 318 o_entryhi = read_gc0_entryhi(); 319 o_index = read_gc0_index(); 320 321 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); 322 mtc0_tlbw_hazard(); 323 guest_tlb_probe(); 324 tlb_probe_hazard(); 325 326 index = read_gc0_index(); 327 if (index < 0) { 328 /* No match, fail */ 329 write_gc0_entryhi(o_entryhi); 330 write_gc0_index(o_index); 331 332 clear_root_gid(); 333 htw_start(); 334 local_irq_restore(flags); 335 return -EFAULT; 336 } 337 338 /* Match! read the TLB entry */ 339 o_entrylo[0] = read_gc0_entrylo0(); 340 o_entrylo[1] = read_gc0_entrylo1(); 341 o_pagemask = read_gc0_pagemask(); 342 343 mtc0_tlbr_hazard(); 344 guest_tlb_read(); 345 tlb_read_hazard(); 346 347 entrylo[0] = read_gc0_entrylo0(); 348 entrylo[1] = read_gc0_entrylo1(); 349 pagemask = ~read_gc0_pagemask() & ~0x1fffl; 350 351 write_gc0_entryhi(o_entryhi); 352 write_gc0_index(o_index); 353 write_gc0_entrylo0(o_entrylo[0]); 354 write_gc0_entrylo1(o_entrylo[1]); 355 write_gc0_pagemask(o_pagemask); 356 357 clear_root_gid(); 358 htw_start(); 359 local_irq_restore(flags); 360 361 /* Select one of the EntryLo values and interpret the GPA */ 362 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1; 363 pa = entrylo[!!(gva & pagemaskbit)]; 364 365 /* 366 * TLB entry may have become invalid since TLB probe if physical FTLB 367 * entries are shared between threads (e.g. I6400). 368 */ 369 if (!(pa & ENTRYLO_V)) 370 return -EFAULT; 371 372 /* 373 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is 374 * split with XI/RI in the middle. 375 */ 376 pa = (pa << 6) & ~0xfffl; 377 pa |= gva & ~(pagemask | pagemaskbit); 378 379 *gpa = pa; 380 return 0; 381 } 382 EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup); 383 384 /** 385 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for 386 * guests. 387 * 388 * Invalidate all entries in root tlb which are GPA mappings. 389 */ 390 void kvm_vz_local_flush_roottlb_all_guests(void) 391 { 392 unsigned long flags; 393 unsigned long old_entryhi, old_pagemask, old_guestctl1; 394 int entry; 395 396 if (WARN_ON(!cpu_has_guestid)) 397 return; 398 399 local_irq_save(flags); 400 htw_stop(); 401 402 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */ 403 old_entryhi = read_c0_entryhi(); 404 old_pagemask = read_c0_pagemask(); 405 old_guestctl1 = read_c0_guestctl1(); 406 407 /* 408 * Invalidate guest entries in root TLB while leaving root entries 409 * intact when possible. 410 */ 411 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { 412 write_c0_index(entry); 413 mtc0_tlbw_hazard(); 414 tlb_read(); 415 tlb_read_hazard(); 416 417 /* Don't invalidate non-guest (RVA) mappings in the root TLB */ 418 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID)) 419 continue; 420 421 /* Make sure all entries differ. */ 422 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 423 write_c0_entrylo0(0); 424 write_c0_entrylo1(0); 425 write_c0_guestctl1(0); 426 mtc0_tlbw_hazard(); 427 tlb_write_indexed(); 428 } 429 430 write_c0_entryhi(old_entryhi); 431 write_c0_pagemask(old_pagemask); 432 write_c0_guestctl1(old_guestctl1); 433 tlbw_use_hazard(); 434 435 htw_start(); 436 local_irq_restore(flags); 437 } 438 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests); 439 440 /** 441 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries. 442 * 443 * Invalidate all entries in guest tlb irrespective of guestid. 444 */ 445 void kvm_vz_local_flush_guesttlb_all(void) 446 { 447 unsigned long flags; 448 unsigned long old_index; 449 unsigned long old_entryhi; 450 unsigned long old_entrylo[2]; 451 unsigned long old_pagemask; 452 int entry; 453 u64 cvmmemctl2 = 0; 454 455 local_irq_save(flags); 456 457 /* Preserve all clobbered guest registers */ 458 old_index = read_gc0_index(); 459 old_entryhi = read_gc0_entryhi(); 460 old_entrylo[0] = read_gc0_entrylo0(); 461 old_entrylo[1] = read_gc0_entrylo1(); 462 old_pagemask = read_gc0_pagemask(); 463 464 switch (current_cpu_type()) { 465 case CPU_CAVIUM_OCTEON3: 466 /* Inhibit machine check due to multiple matching TLB entries */ 467 cvmmemctl2 = read_c0_cvmmemctl2(); 468 cvmmemctl2 |= CVMMEMCTL2_INHIBITTS; 469 write_c0_cvmmemctl2(cvmmemctl2); 470 break; 471 } 472 473 /* Invalidate guest entries in guest TLB */ 474 write_gc0_entrylo0(0); 475 write_gc0_entrylo1(0); 476 write_gc0_pagemask(0); 477 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) { 478 /* Make sure all entries differ. */ 479 write_gc0_index(entry); 480 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry)); 481 mtc0_tlbw_hazard(); 482 guest_tlb_write_indexed(); 483 } 484 485 if (cvmmemctl2) { 486 cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS; 487 write_c0_cvmmemctl2(cvmmemctl2); 488 } 489 490 write_gc0_index(old_index); 491 write_gc0_entryhi(old_entryhi); 492 write_gc0_entrylo0(old_entrylo[0]); 493 write_gc0_entrylo1(old_entrylo[1]); 494 write_gc0_pagemask(old_pagemask); 495 tlbw_use_hazard(); 496 497 local_irq_restore(flags); 498 } 499 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all); 500 501 /** 502 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries. 503 * @buf: Buffer to write TLB entries into. 504 * @index: Start index. 505 * @count: Number of entries to save. 506 * 507 * Save a range of guest TLB entries. The caller must ensure interrupts are 508 * disabled. 509 */ 510 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, 511 unsigned int count) 512 { 513 unsigned int end = index + count; 514 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; 515 unsigned int guestctl1 = 0; 516 int old_index, i; 517 518 /* Save registers we're about to clobber */ 519 old_index = read_gc0_index(); 520 old_entryhi = read_gc0_entryhi(); 521 old_entrylo0 = read_gc0_entrylo0(); 522 old_entrylo1 = read_gc0_entrylo1(); 523 old_pagemask = read_gc0_pagemask(); 524 525 /* Set root GuestID for root probe */ 526 htw_stop(); 527 set_root_gid_to_guest_gid(); 528 if (cpu_has_guestid) 529 guestctl1 = read_c0_guestctl1(); 530 531 /* Read each entry from guest TLB */ 532 for (i = index; i < end; ++i, ++buf) { 533 write_gc0_index(i); 534 535 mtc0_tlbr_hazard(); 536 guest_tlb_read(); 537 tlb_read_hazard(); 538 539 if (cpu_has_guestid && 540 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) { 541 /* Entry invalid or belongs to another guest */ 542 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 543 buf->tlb_lo[0] = 0; 544 buf->tlb_lo[1] = 0; 545 buf->tlb_mask = 0; 546 } else { 547 /* Entry belongs to the right guest */ 548 buf->tlb_hi = read_gc0_entryhi(); 549 buf->tlb_lo[0] = read_gc0_entrylo0(); 550 buf->tlb_lo[1] = read_gc0_entrylo1(); 551 buf->tlb_mask = read_gc0_pagemask(); 552 } 553 } 554 555 /* Clear root GuestID again */ 556 clear_root_gid(); 557 htw_start(); 558 559 /* Restore clobbered registers */ 560 write_gc0_index(old_index); 561 write_gc0_entryhi(old_entryhi); 562 write_gc0_entrylo0(old_entrylo0); 563 write_gc0_entrylo1(old_entrylo1); 564 write_gc0_pagemask(old_pagemask); 565 566 tlbw_use_hazard(); 567 } 568 EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb); 569 570 /** 571 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries. 572 * @buf: Buffer to read TLB entries from. 573 * @index: Start index. 574 * @count: Number of entries to load. 575 * 576 * Load a range of guest TLB entries. The caller must ensure interrupts are 577 * disabled. 578 */ 579 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, 580 unsigned int count) 581 { 582 unsigned int end = index + count; 583 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; 584 int old_index, i; 585 586 /* Save registers we're about to clobber */ 587 old_index = read_gc0_index(); 588 old_entryhi = read_gc0_entryhi(); 589 old_entrylo0 = read_gc0_entrylo0(); 590 old_entrylo1 = read_gc0_entrylo1(); 591 old_pagemask = read_gc0_pagemask(); 592 593 /* Set root GuestID for root probe */ 594 htw_stop(); 595 set_root_gid_to_guest_gid(); 596 597 /* Write each entry to guest TLB */ 598 for (i = index; i < end; ++i, ++buf) { 599 write_gc0_index(i); 600 write_gc0_entryhi(buf->tlb_hi); 601 write_gc0_entrylo0(buf->tlb_lo[0]); 602 write_gc0_entrylo1(buf->tlb_lo[1]); 603 write_gc0_pagemask(buf->tlb_mask); 604 605 mtc0_tlbw_hazard(); 606 guest_tlb_write_indexed(); 607 } 608 609 /* Clear root GuestID again */ 610 clear_root_gid(); 611 htw_start(); 612 613 /* Restore clobbered registers */ 614 write_gc0_index(old_index); 615 write_gc0_entryhi(old_entryhi); 616 write_gc0_entrylo0(old_entrylo0); 617 write_gc0_entrylo1(old_entrylo1); 618 write_gc0_pagemask(old_pagemask); 619 620 tlbw_use_hazard(); 621 } 622 EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb); 623 624 #endif 625 626 /** 627 * kvm_mips_suspend_mm() - Suspend the active mm. 628 * @cpu The CPU we're running on. 629 * 630 * Suspend the active_mm, ready for a switch to a KVM guest virtual address 631 * space. This is left active for the duration of guest context, including time 632 * with interrupts enabled, so we need to be careful not to confuse e.g. cache 633 * management IPIs. 634 * 635 * kvm_mips_resume_mm() should be called before context switching to a different 636 * process so we don't need to worry about reference counting. 637 * 638 * This needs to be in static kernel code to avoid exporting init_mm. 639 */ 640 void kvm_mips_suspend_mm(int cpu) 641 { 642 cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); 643 current->active_mm = &init_mm; 644 } 645 EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); 646 647 /** 648 * kvm_mips_resume_mm() - Resume the current process mm. 649 * @cpu The CPU we're running on. 650 * 651 * Resume the mm of the current process, after a switch back from a KVM guest 652 * virtual address space (see kvm_mips_suspend_mm()). 653 */ 654 void kvm_mips_resume_mm(int cpu) 655 { 656 cpumask_set_cpu(cpu, mm_cpumask(current->mm)); 657 current->active_mm = current->mm; 658 } 659 EXPORT_SYMBOL_GPL(kvm_mips_resume_mm); 660