1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 8 * 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/mm.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/kvm_host.h> 19 #include <linux/srcu.h> 20 21 #include <asm/cpu.h> 22 #include <asm/bootinfo.h> 23 #include <asm/mmu_context.h> 24 #include <asm/pgtable.h> 25 #include <asm/cacheflush.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbdebug.h> 28 29 #undef CONFIG_MIPS_MT 30 #include <asm/r4kcache.h> 31 #define CONFIG_MIPS_MT 32 33 #define KVM_GUEST_PC_TLB 0 34 #define KVM_GUEST_SP_TLB 1 35 36 atomic_t kvm_mips_instance; 37 EXPORT_SYMBOL_GPL(kvm_mips_instance); 38 39 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 40 { 41 int cpu = smp_processor_id(); 42 43 return vcpu->arch.guest_kernel_asid[cpu] & 44 cpu_asid_mask(&cpu_data[cpu]); 45 } 46 47 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 48 { 49 int cpu = smp_processor_id(); 50 51 return vcpu->arch.guest_user_asid[cpu] & 52 cpu_asid_mask(&cpu_data[cpu]); 53 } 54 55 inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) 56 { 57 return vcpu->kvm->arch.commpage_tlb; 58 } 59 60 /* Structure defining an tlb entry data set. */ 61 62 void kvm_mips_dump_host_tlbs(void) 63 { 64 unsigned long flags; 65 66 local_irq_save(flags); 67 68 kvm_info("HOST TLBs:\n"); 69 dump_tlb_regs(); 70 pr_info("\n"); 71 dump_tlb_all(); 72 73 local_irq_restore(flags); 74 } 75 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); 76 77 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 78 { 79 struct mips_coproc *cop0 = vcpu->arch.cop0; 80 struct kvm_mips_tlb tlb; 81 int i; 82 83 kvm_info("Guest TLBs:\n"); 84 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); 85 86 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 87 tlb = vcpu->arch.guest_tlb[i]; 88 kvm_info("TLB%c%3d Hi 0x%08lx ", 89 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V 90 ? ' ' : '*', 91 i, tlb.tlb_hi); 92 kvm_info("Lo0=0x%09llx %c%c attr %lx ", 93 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), 94 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', 95 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', 96 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); 97 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", 98 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), 99 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', 100 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', 101 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, 102 tlb.tlb_mask); 103 } 104 } 105 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); 106 107 /* XXXKYMA: Must be called with interrupts disabled */ 108 /* set flush_dcache_mask == 0 if no dcache flush required */ 109 int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, 110 unsigned long entrylo0, unsigned long entrylo1, 111 int flush_dcache_mask) 112 { 113 unsigned long flags; 114 unsigned long old_entryhi; 115 int idx; 116 117 local_irq_save(flags); 118 119 old_entryhi = read_c0_entryhi(); 120 write_c0_entryhi(entryhi); 121 mtc0_tlbw_hazard(); 122 123 tlb_probe(); 124 tlb_probe_hazard(); 125 idx = read_c0_index(); 126 127 if (idx > current_cpu_data.tlbsize) { 128 kvm_err("%s: Invalid Index: %d\n", __func__, idx); 129 kvm_mips_dump_host_tlbs(); 130 local_irq_restore(flags); 131 return -1; 132 } 133 134 write_c0_entrylo0(entrylo0); 135 write_c0_entrylo1(entrylo1); 136 mtc0_tlbw_hazard(); 137 138 if (idx < 0) 139 tlb_write_random(); 140 else 141 tlb_write_indexed(); 142 tlbw_use_hazard(); 143 144 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", 145 vcpu->arch.pc, idx, read_c0_entryhi(), 146 read_c0_entrylo0(), read_c0_entrylo1()); 147 148 /* Flush D-cache */ 149 if (flush_dcache_mask) { 150 if (entrylo0 & ENTRYLO_V) { 151 ++vcpu->stat.flush_dcache_exits; 152 flush_data_cache_page((entryhi & VPN2_MASK) & 153 ~flush_dcache_mask); 154 } 155 if (entrylo1 & ENTRYLO_V) { 156 ++vcpu->stat.flush_dcache_exits; 157 flush_data_cache_page(((entryhi & VPN2_MASK) & 158 ~flush_dcache_mask) | 159 (0x1 << PAGE_SHIFT)); 160 } 161 } 162 163 /* Restore old ASID */ 164 write_c0_entryhi(old_entryhi); 165 mtc0_tlbw_hazard(); 166 local_irq_restore(flags); 167 return 0; 168 } 169 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write); 170 171 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 172 struct kvm_vcpu *vcpu) 173 { 174 kvm_pfn_t pfn; 175 unsigned long flags, old_entryhi = 0, vaddr = 0; 176 unsigned long entrylo[2] = { 0, 0 }; 177 unsigned int pair_idx; 178 179 pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage)); 180 pair_idx = (badvaddr >> PAGE_SHIFT) & 1; 181 entrylo[pair_idx] = mips3_paddr_to_tlbpfn(pfn << PAGE_SHIFT) | 182 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | 183 ENTRYLO_D | ENTRYLO_V; 184 185 local_irq_save(flags); 186 187 old_entryhi = read_c0_entryhi(); 188 vaddr = badvaddr & (PAGE_MASK << 1); 189 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu)); 190 write_c0_entrylo0(entrylo[0]); 191 write_c0_entrylo1(entrylo[1]); 192 write_c0_index(kvm_mips_get_commpage_asid(vcpu)); 193 mtc0_tlbw_hazard(); 194 tlb_write_indexed(); 195 tlbw_use_hazard(); 196 197 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", 198 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), 199 read_c0_entrylo0(), read_c0_entrylo1()); 200 201 /* Restore old ASID */ 202 write_c0_entryhi(old_entryhi); 203 mtc0_tlbw_hazard(); 204 local_irq_restore(flags); 205 206 return 0; 207 } 208 EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault); 209 210 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 211 { 212 int i; 213 int index = -1; 214 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; 215 216 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 217 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && 218 TLB_HI_ASID_HIT(tlb[i], entryhi)) { 219 index = i; 220 break; 221 } 222 } 223 224 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", 225 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); 226 227 return index; 228 } 229 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); 230 231 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) 232 { 233 unsigned long old_entryhi, flags; 234 int idx; 235 236 local_irq_save(flags); 237 238 old_entryhi = read_c0_entryhi(); 239 240 if (KVM_GUEST_KERNEL_MODE(vcpu)) 241 write_c0_entryhi((vaddr & VPN2_MASK) | 242 kvm_mips_get_kernel_asid(vcpu)); 243 else { 244 write_c0_entryhi((vaddr & VPN2_MASK) | 245 kvm_mips_get_user_asid(vcpu)); 246 } 247 248 mtc0_tlbw_hazard(); 249 250 tlb_probe(); 251 tlb_probe_hazard(); 252 idx = read_c0_index(); 253 254 /* Restore old ASID */ 255 write_c0_entryhi(old_entryhi); 256 mtc0_tlbw_hazard(); 257 258 local_irq_restore(flags); 259 260 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); 261 262 return idx; 263 } 264 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup); 265 266 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 267 { 268 int idx; 269 unsigned long flags, old_entryhi; 270 271 local_irq_save(flags); 272 273 old_entryhi = read_c0_entryhi(); 274 275 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); 276 mtc0_tlbw_hazard(); 277 278 tlb_probe(); 279 tlb_probe_hazard(); 280 idx = read_c0_index(); 281 282 if (idx >= current_cpu_data.tlbsize) 283 BUG(); 284 285 if (idx > 0) { 286 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 287 write_c0_entrylo0(0); 288 write_c0_entrylo1(0); 289 mtc0_tlbw_hazard(); 290 291 tlb_write_indexed(); 292 tlbw_use_hazard(); 293 } 294 295 write_c0_entryhi(old_entryhi); 296 mtc0_tlbw_hazard(); 297 298 local_irq_restore(flags); 299 300 if (idx > 0) 301 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, 302 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx); 303 304 return 0; 305 } 306 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 307 308 void kvm_mips_flush_host_tlb(int skip_kseg0) 309 { 310 unsigned long flags; 311 unsigned long old_entryhi, entryhi; 312 unsigned long old_pagemask; 313 int entry = 0; 314 int maxentry = current_cpu_data.tlbsize; 315 316 local_irq_save(flags); 317 318 old_entryhi = read_c0_entryhi(); 319 old_pagemask = read_c0_pagemask(); 320 321 /* Blast 'em all away. */ 322 for (entry = 0; entry < maxentry; entry++) { 323 write_c0_index(entry); 324 325 if (skip_kseg0) { 326 mtc0_tlbr_hazard(); 327 tlb_read(); 328 tlb_read_hazard(); 329 330 entryhi = read_c0_entryhi(); 331 332 /* Don't blow away guest kernel entries */ 333 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) 334 continue; 335 336 write_c0_pagemask(old_pagemask); 337 } 338 339 /* Make sure all entries differ. */ 340 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 341 write_c0_entrylo0(0); 342 write_c0_entrylo1(0); 343 mtc0_tlbw_hazard(); 344 345 tlb_write_indexed(); 346 tlbw_use_hazard(); 347 } 348 349 write_c0_entryhi(old_entryhi); 350 write_c0_pagemask(old_pagemask); 351 mtc0_tlbw_hazard(); 352 353 local_irq_restore(flags); 354 } 355 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb); 356 357 void kvm_local_flush_tlb_all(void) 358 { 359 unsigned long flags; 360 unsigned long old_ctx; 361 int entry = 0; 362 363 local_irq_save(flags); 364 /* Save old context and create impossible VPN2 value */ 365 old_ctx = read_c0_entryhi(); 366 write_c0_entrylo0(0); 367 write_c0_entrylo1(0); 368 369 /* Blast 'em all away. */ 370 while (entry < current_cpu_data.tlbsize) { 371 /* Make sure all entries differ. */ 372 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 373 write_c0_index(entry); 374 mtc0_tlbw_hazard(); 375 tlb_write_indexed(); 376 tlbw_use_hazard(); 377 entry++; 378 } 379 write_c0_entryhi(old_ctx); 380 mtc0_tlbw_hazard(); 381 382 local_irq_restore(flags); 383 } 384 EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all); 385