1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 8 * 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/mm.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/kvm_host.h> 19 #include <linux/srcu.h> 20 21 #include <asm/cpu.h> 22 #include <asm/bootinfo.h> 23 #include <asm/mmu_context.h> 24 #include <asm/pgtable.h> 25 #include <asm/cacheflush.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbdebug.h> 28 29 #undef CONFIG_MIPS_MT 30 #include <asm/r4kcache.h> 31 #define CONFIG_MIPS_MT 32 33 #define KVM_GUEST_PC_TLB 0 34 #define KVM_GUEST_SP_TLB 1 35 36 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 37 { 38 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 39 int cpu = smp_processor_id(); 40 41 return cpu_asid(cpu, kern_mm); 42 } 43 44 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 45 { 46 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 47 int cpu = smp_processor_id(); 48 49 return cpu_asid(cpu, user_mm); 50 } 51 52 /* Structure defining an tlb entry data set. */ 53 54 void kvm_mips_dump_host_tlbs(void) 55 { 56 unsigned long flags; 57 58 local_irq_save(flags); 59 60 kvm_info("HOST TLBs:\n"); 61 dump_tlb_regs(); 62 pr_info("\n"); 63 dump_tlb_all(); 64 65 local_irq_restore(flags); 66 } 67 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); 68 69 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 70 { 71 struct mips_coproc *cop0 = vcpu->arch.cop0; 72 struct kvm_mips_tlb tlb; 73 int i; 74 75 kvm_info("Guest TLBs:\n"); 76 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); 77 78 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 79 tlb = vcpu->arch.guest_tlb[i]; 80 kvm_info("TLB%c%3d Hi 0x%08lx ", 81 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V 82 ? ' ' : '*', 83 i, tlb.tlb_hi); 84 kvm_info("Lo0=0x%09llx %c%c attr %lx ", 85 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), 86 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', 87 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', 88 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); 89 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", 90 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), 91 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', 92 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', 93 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, 94 tlb.tlb_mask); 95 } 96 } 97 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); 98 99 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 100 { 101 int i; 102 int index = -1; 103 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; 104 105 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 106 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && 107 TLB_HI_ASID_HIT(tlb[i], entryhi)) { 108 index = i; 109 break; 110 } 111 } 112 113 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", 114 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); 115 116 return index; 117 } 118 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); 119 120 static int _kvm_mips_host_tlb_inv(unsigned long entryhi) 121 { 122 int idx; 123 124 write_c0_entryhi(entryhi); 125 mtc0_tlbw_hazard(); 126 127 tlb_probe(); 128 tlb_probe_hazard(); 129 idx = read_c0_index(); 130 131 if (idx >= current_cpu_data.tlbsize) 132 BUG(); 133 134 if (idx >= 0) { 135 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 136 write_c0_entrylo0(0); 137 write_c0_entrylo1(0); 138 mtc0_tlbw_hazard(); 139 140 tlb_write_indexed(); 141 tlbw_use_hazard(); 142 } 143 144 return idx; 145 } 146 147 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, 148 bool user, bool kernel) 149 { 150 int idx_user, idx_kernel; 151 unsigned long flags, old_entryhi; 152 153 local_irq_save(flags); 154 155 old_entryhi = read_c0_entryhi(); 156 157 if (user) 158 idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 159 kvm_mips_get_user_asid(vcpu)); 160 if (kernel) 161 idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 162 kvm_mips_get_kernel_asid(vcpu)); 163 164 write_c0_entryhi(old_entryhi); 165 mtc0_tlbw_hazard(); 166 167 local_irq_restore(flags); 168 169 if (user && idx_user >= 0) 170 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", 171 __func__, (va & VPN2_MASK) | 172 kvm_mips_get_user_asid(vcpu), idx_user); 173 if (kernel && idx_kernel >= 0) 174 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", 175 __func__, (va & VPN2_MASK) | 176 kvm_mips_get_kernel_asid(vcpu), idx_kernel); 177 178 return 0; 179 } 180 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 181 182 /** 183 * kvm_mips_suspend_mm() - Suspend the active mm. 184 * @cpu The CPU we're running on. 185 * 186 * Suspend the active_mm, ready for a switch to a KVM guest virtual address 187 * space. This is left active for the duration of guest context, including time 188 * with interrupts enabled, so we need to be careful not to confuse e.g. cache 189 * management IPIs. 190 * 191 * kvm_mips_resume_mm() should be called before context switching to a different 192 * process so we don't need to worry about reference counting. 193 * 194 * This needs to be in static kernel code to avoid exporting init_mm. 195 */ 196 void kvm_mips_suspend_mm(int cpu) 197 { 198 cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); 199 current->active_mm = &init_mm; 200 } 201 EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); 202 203 /** 204 * kvm_mips_resume_mm() - Resume the current process mm. 205 * @cpu The CPU we're running on. 206 * 207 * Resume the mm of the current process, after a switch back from a KVM guest 208 * virtual address space (see kvm_mips_suspend_mm()). 209 */ 210 void kvm_mips_resume_mm(int cpu) 211 { 212 cpumask_set_cpu(cpu, mm_cpumask(current->mm)); 213 current->active_mm = current->mm; 214 } 215 EXPORT_SYMBOL_GPL(kvm_mips_resume_mm); 216