xref: /openbmc/linux/arch/mips/kvm/tlb.c (revision 95b384f9)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7  * TLB handlers run from KSEG0
8  *
9  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
10  * Authors: Sanjay Lal <sanjayl@kymasys.com>
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
20 
21 #include <asm/cpu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlb.h>
27 
28 #undef CONFIG_MIPS_MT
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
31 
32 #define KVM_GUEST_PC_TLB    0
33 #define KVM_GUEST_SP_TLB    1
34 
35 #define PRIx64 "llx"
36 
37 atomic_t kvm_mips_instance;
38 EXPORT_SYMBOL_GPL(kvm_mips_instance);
39 
40 /* These function pointers are initialized once the KVM module is loaded */
41 kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
42 EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
43 
44 void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
45 EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
46 
47 bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
48 EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
49 
50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
51 {
52 	int cpu = smp_processor_id();
53 
54 	return vcpu->arch.guest_kernel_asid[cpu] &
55 			cpu_asid_mask(&cpu_data[cpu]);
56 }
57 
58 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59 {
60 	int cpu = smp_processor_id();
61 
62 	return vcpu->arch.guest_user_asid[cpu] &
63 			cpu_asid_mask(&cpu_data[cpu]);
64 }
65 
66 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
67 {
68 	return vcpu->kvm->arch.commpage_tlb;
69 }
70 
71 /* Structure defining an tlb entry data set. */
72 
73 void kvm_mips_dump_host_tlbs(void)
74 {
75 	unsigned long old_entryhi;
76 	unsigned long old_pagemask;
77 	struct kvm_mips_tlb tlb;
78 	unsigned long flags;
79 	int i;
80 
81 	local_irq_save(flags);
82 
83 	old_entryhi = read_c0_entryhi();
84 	old_pagemask = read_c0_pagemask();
85 
86 	kvm_info("HOST TLBs:\n");
87 	kvm_info("ASID: %#lx\n", read_c0_entryhi() &
88 		 cpu_asid_mask(&current_cpu_data));
89 
90 	for (i = 0; i < current_cpu_data.tlbsize; i++) {
91 		write_c0_index(i);
92 		mtc0_tlbw_hazard();
93 
94 		tlb_read();
95 		tlbw_use_hazard();
96 
97 		tlb.tlb_hi = read_c0_entryhi();
98 		tlb.tlb_lo0 = read_c0_entrylo0();
99 		tlb.tlb_lo1 = read_c0_entrylo1();
100 		tlb.tlb_mask = read_c0_pagemask();
101 
102 		kvm_info("TLB%c%3d Hi 0x%08lx ",
103 			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
104 			 i, tlb.tlb_hi);
105 		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
106 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
107 			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
108 			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
109 			 (tlb.tlb_lo0 >> 3) & 7);
110 		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
111 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
112 			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
113 			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
114 			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
115 	}
116 	write_c0_entryhi(old_entryhi);
117 	write_c0_pagemask(old_pagemask);
118 	mtc0_tlbw_hazard();
119 	local_irq_restore(flags);
120 }
121 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
122 
123 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
124 {
125 	struct mips_coproc *cop0 = vcpu->arch.cop0;
126 	struct kvm_mips_tlb tlb;
127 	int i;
128 
129 	kvm_info("Guest TLBs:\n");
130 	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
131 
132 	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
133 		tlb = vcpu->arch.guest_tlb[i];
134 		kvm_info("TLB%c%3d Hi 0x%08lx ",
135 			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
136 			 i, tlb.tlb_hi);
137 		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
138 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
139 			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
140 			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
141 			 (tlb.tlb_lo0 >> 3) & 7);
142 		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
143 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
144 			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
145 			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
146 			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
147 	}
148 }
149 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
150 
151 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
152 {
153 	int srcu_idx, err = 0;
154 	kvm_pfn_t pfn;
155 
156 	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
157 		return 0;
158 
159 	srcu_idx = srcu_read_lock(&kvm->srcu);
160 	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
161 
162 	if (kvm_mips_is_error_pfn(pfn)) {
163 		kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
164 		err = -EFAULT;
165 		goto out;
166 	}
167 
168 	kvm->arch.guest_pmap[gfn] = pfn;
169 out:
170 	srcu_read_unlock(&kvm->srcu, srcu_idx);
171 	return err;
172 }
173 
174 /* Translate guest KSEG0 addresses to Host PA */
175 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
176 						    unsigned long gva)
177 {
178 	gfn_t gfn;
179 	uint32_t offset = gva & ~PAGE_MASK;
180 	struct kvm *kvm = vcpu->kvm;
181 
182 	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
183 		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
184 			__builtin_return_address(0), gva);
185 		return KVM_INVALID_PAGE;
186 	}
187 
188 	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
189 
190 	if (gfn >= kvm->arch.guest_pmap_npages) {
191 		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
192 			gva);
193 		return KVM_INVALID_PAGE;
194 	}
195 
196 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
197 		return KVM_INVALID_ADDR;
198 
199 	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
200 }
201 EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
202 
203 /* XXXKYMA: Must be called with interrupts disabled */
204 /* set flush_dcache_mask == 0 if no dcache flush required */
205 int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
206 			    unsigned long entrylo0, unsigned long entrylo1,
207 			    int flush_dcache_mask)
208 {
209 	unsigned long flags;
210 	unsigned long old_entryhi;
211 	int idx;
212 
213 	local_irq_save(flags);
214 
215 	old_entryhi = read_c0_entryhi();
216 	write_c0_entryhi(entryhi);
217 	mtc0_tlbw_hazard();
218 
219 	tlb_probe();
220 	tlb_probe_hazard();
221 	idx = read_c0_index();
222 
223 	if (idx > current_cpu_data.tlbsize) {
224 		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
225 		kvm_mips_dump_host_tlbs();
226 		local_irq_restore(flags);
227 		return -1;
228 	}
229 
230 	write_c0_entrylo0(entrylo0);
231 	write_c0_entrylo1(entrylo1);
232 	mtc0_tlbw_hazard();
233 
234 	if (idx < 0)
235 		tlb_write_random();
236 	else
237 		tlb_write_indexed();
238 	tlbw_use_hazard();
239 
240 	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
241 		  vcpu->arch.pc, idx, read_c0_entryhi(),
242 		  read_c0_entrylo0(), read_c0_entrylo1());
243 
244 	/* Flush D-cache */
245 	if (flush_dcache_mask) {
246 		if (entrylo0 & MIPS3_PG_V) {
247 			++vcpu->stat.flush_dcache_exits;
248 			flush_data_cache_page((entryhi & VPN2_MASK) &
249 					      ~flush_dcache_mask);
250 		}
251 		if (entrylo1 & MIPS3_PG_V) {
252 			++vcpu->stat.flush_dcache_exits;
253 			flush_data_cache_page(((entryhi & VPN2_MASK) &
254 					       ~flush_dcache_mask) |
255 					      (0x1 << PAGE_SHIFT));
256 		}
257 	}
258 
259 	/* Restore old ASID */
260 	write_c0_entryhi(old_entryhi);
261 	mtc0_tlbw_hazard();
262 	tlbw_use_hazard();
263 	local_irq_restore(flags);
264 	return 0;
265 }
266 
267 /* XXXKYMA: Must be called with interrupts disabled */
268 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
269 				    struct kvm_vcpu *vcpu)
270 {
271 	gfn_t gfn;
272 	kvm_pfn_t pfn0, pfn1;
273 	unsigned long vaddr = 0;
274 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
275 	int even;
276 	struct kvm *kvm = vcpu->kvm;
277 	const int flush_dcache_mask = 0;
278 	int ret;
279 
280 	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
281 		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
282 		kvm_mips_dump_host_tlbs();
283 		return -1;
284 	}
285 
286 	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
287 	if (gfn >= kvm->arch.guest_pmap_npages) {
288 		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
289 			gfn, badvaddr);
290 		kvm_mips_dump_host_tlbs();
291 		return -1;
292 	}
293 	even = !(gfn & 0x1);
294 	vaddr = badvaddr & (PAGE_MASK << 1);
295 
296 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
297 		return -1;
298 
299 	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
300 		return -1;
301 
302 	if (even) {
303 		pfn0 = kvm->arch.guest_pmap[gfn];
304 		pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
305 	} else {
306 		pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
307 		pfn1 = kvm->arch.guest_pmap[gfn];
308 	}
309 
310 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
311 		   (1 << 2) | (0x1 << 1);
312 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
313 		   (1 << 2) | (0x1 << 1);
314 
315 	preempt_disable();
316 	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
317 	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
318 				      flush_dcache_mask);
319 	preempt_enable();
320 
321 	return ret;
322 }
323 EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
324 
325 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
326 	struct kvm_vcpu *vcpu)
327 {
328 	kvm_pfn_t pfn0, pfn1;
329 	unsigned long flags, old_entryhi = 0, vaddr = 0;
330 	unsigned long entrylo0 = 0, entrylo1 = 0;
331 
332 	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
333 	pfn1 = 0;
334 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
335 		   (1 << 2) | (0x1 << 1);
336 	entrylo1 = 0;
337 
338 	local_irq_save(flags);
339 
340 	old_entryhi = read_c0_entryhi();
341 	vaddr = badvaddr & (PAGE_MASK << 1);
342 	write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
343 	mtc0_tlbw_hazard();
344 	write_c0_entrylo0(entrylo0);
345 	mtc0_tlbw_hazard();
346 	write_c0_entrylo1(entrylo1);
347 	mtc0_tlbw_hazard();
348 	write_c0_index(kvm_mips_get_commpage_asid(vcpu));
349 	mtc0_tlbw_hazard();
350 	tlb_write_indexed();
351 	mtc0_tlbw_hazard();
352 	tlbw_use_hazard();
353 
354 	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
355 		  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
356 		  read_c0_entrylo0(), read_c0_entrylo1());
357 
358 	/* Restore old ASID */
359 	write_c0_entryhi(old_entryhi);
360 	mtc0_tlbw_hazard();
361 	tlbw_use_hazard();
362 	local_irq_restore(flags);
363 
364 	return 0;
365 }
366 EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
367 
368 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
369 					 struct kvm_mips_tlb *tlb,
370 					 unsigned long *hpa0,
371 					 unsigned long *hpa1)
372 {
373 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
374 	struct kvm *kvm = vcpu->kvm;
375 	kvm_pfn_t pfn0, pfn1;
376 	int ret;
377 
378 	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
379 		pfn0 = 0;
380 		pfn1 = 0;
381 	} else {
382 		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
383 					   >> PAGE_SHIFT) < 0)
384 			return -1;
385 
386 		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
387 					   >> PAGE_SHIFT) < 0)
388 			return -1;
389 
390 		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
391 					    >> PAGE_SHIFT];
392 		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
393 					    >> PAGE_SHIFT];
394 	}
395 
396 	if (hpa0)
397 		*hpa0 = pfn0 << PAGE_SHIFT;
398 
399 	if (hpa1)
400 		*hpa1 = pfn1 << PAGE_SHIFT;
401 
402 	/* Get attributes from the Guest TLB */
403 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
404 		   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
405 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
406 		   (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
407 
408 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
409 		  tlb->tlb_lo0, tlb->tlb_lo1);
410 
411 	preempt_disable();
412 	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
413 					       kvm_mips_get_kernel_asid(vcpu) :
414 					       kvm_mips_get_user_asid(vcpu));
415 	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
416 				      tlb->tlb_mask);
417 	preempt_enable();
418 
419 	return ret;
420 }
421 EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
422 
423 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
424 {
425 	int i;
426 	int index = -1;
427 	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
428 
429 	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
431 		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
432 			index = i;
433 			break;
434 		}
435 	}
436 
437 	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
438 		  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
439 
440 	return index;
441 }
442 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
443 
444 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
445 {
446 	unsigned long old_entryhi, flags;
447 	int idx;
448 
449 	local_irq_save(flags);
450 
451 	old_entryhi = read_c0_entryhi();
452 
453 	if (KVM_GUEST_KERNEL_MODE(vcpu))
454 		write_c0_entryhi((vaddr & VPN2_MASK) |
455 				 kvm_mips_get_kernel_asid(vcpu));
456 	else {
457 		write_c0_entryhi((vaddr & VPN2_MASK) |
458 				 kvm_mips_get_user_asid(vcpu));
459 	}
460 
461 	mtc0_tlbw_hazard();
462 
463 	tlb_probe();
464 	tlb_probe_hazard();
465 	idx = read_c0_index();
466 
467 	/* Restore old ASID */
468 	write_c0_entryhi(old_entryhi);
469 	mtc0_tlbw_hazard();
470 	tlbw_use_hazard();
471 
472 	local_irq_restore(flags);
473 
474 	kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
475 
476 	return idx;
477 }
478 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
479 
480 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
481 {
482 	int idx;
483 	unsigned long flags, old_entryhi;
484 
485 	local_irq_save(flags);
486 
487 	old_entryhi = read_c0_entryhi();
488 
489 	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
490 	mtc0_tlbw_hazard();
491 
492 	tlb_probe();
493 	tlb_probe_hazard();
494 	idx = read_c0_index();
495 
496 	if (idx >= current_cpu_data.tlbsize)
497 		BUG();
498 
499 	if (idx > 0) {
500 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
501 		mtc0_tlbw_hazard();
502 
503 		write_c0_entrylo0(0);
504 		mtc0_tlbw_hazard();
505 
506 		write_c0_entrylo1(0);
507 		mtc0_tlbw_hazard();
508 
509 		tlb_write_indexed();
510 		mtc0_tlbw_hazard();
511 	}
512 
513 	write_c0_entryhi(old_entryhi);
514 	mtc0_tlbw_hazard();
515 	tlbw_use_hazard();
516 
517 	local_irq_restore(flags);
518 
519 	if (idx > 0)
520 		kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
521 			  (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
522 
523 	return 0;
524 }
525 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
526 
527 void kvm_mips_flush_host_tlb(int skip_kseg0)
528 {
529 	unsigned long flags;
530 	unsigned long old_entryhi, entryhi;
531 	unsigned long old_pagemask;
532 	int entry = 0;
533 	int maxentry = current_cpu_data.tlbsize;
534 
535 	local_irq_save(flags);
536 
537 	old_entryhi = read_c0_entryhi();
538 	old_pagemask = read_c0_pagemask();
539 
540 	/* Blast 'em all away. */
541 	for (entry = 0; entry < maxentry; entry++) {
542 		write_c0_index(entry);
543 		mtc0_tlbw_hazard();
544 
545 		if (skip_kseg0) {
546 			tlb_read();
547 			tlbw_use_hazard();
548 
549 			entryhi = read_c0_entryhi();
550 
551 			/* Don't blow away guest kernel entries */
552 			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
553 				continue;
554 		}
555 
556 		/* Make sure all entries differ. */
557 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
558 		mtc0_tlbw_hazard();
559 		write_c0_entrylo0(0);
560 		mtc0_tlbw_hazard();
561 		write_c0_entrylo1(0);
562 		mtc0_tlbw_hazard();
563 
564 		tlb_write_indexed();
565 		mtc0_tlbw_hazard();
566 	}
567 
568 	tlbw_use_hazard();
569 
570 	write_c0_entryhi(old_entryhi);
571 	write_c0_pagemask(old_pagemask);
572 	mtc0_tlbw_hazard();
573 	tlbw_use_hazard();
574 
575 	local_irq_restore(flags);
576 }
577 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
578 
579 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
580 			     struct kvm_vcpu *vcpu)
581 {
582 	unsigned long asid = asid_cache(cpu);
583 
584 	asid += cpu_asid_inc();
585 	if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
586 		if (cpu_has_vtag_icache)
587 			flush_icache_all();
588 
589 		kvm_local_flush_tlb_all();      /* start new asid cycle */
590 
591 		if (!asid)      /* fix version if needed */
592 			asid = asid_first_version(cpu);
593 	}
594 
595 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
596 }
597 
598 void kvm_local_flush_tlb_all(void)
599 {
600 	unsigned long flags;
601 	unsigned long old_ctx;
602 	int entry = 0;
603 
604 	local_irq_save(flags);
605 	/* Save old context and create impossible VPN2 value */
606 	old_ctx = read_c0_entryhi();
607 	write_c0_entrylo0(0);
608 	write_c0_entrylo1(0);
609 
610 	/* Blast 'em all away. */
611 	while (entry < current_cpu_data.tlbsize) {
612 		/* Make sure all entries differ. */
613 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
614 		write_c0_index(entry);
615 		mtc0_tlbw_hazard();
616 		tlb_write_indexed();
617 		entry++;
618 	}
619 	tlbw_use_hazard();
620 	write_c0_entryhi(old_ctx);
621 	mtc0_tlbw_hazard();
622 
623 	local_irq_restore(flags);
624 }
625 EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
626 
627 /**
628  * kvm_mips_migrate_count() - Migrate timer.
629  * @vcpu:	Virtual CPU.
630  *
631  * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
632  * if it was running prior to being cancelled.
633  *
634  * Must be called when the VCPU is migrated to a different CPU to ensure that
635  * timer expiry during guest execution interrupts the guest and causes the
636  * interrupt to be delivered in a timely manner.
637  */
638 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
639 {
640 	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
641 		hrtimer_restart(&vcpu->arch.comparecount_timer);
642 }
643 
644 /* Restore ASID once we are scheduled back after preemption */
645 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
646 {
647 	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
648 	unsigned long flags;
649 	int newasid = 0;
650 
651 	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
652 
653 	/* Allocate new kernel and user ASIDs if needed */
654 
655 	local_irq_save(flags);
656 
657 	if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
658 						asid_version_mask(cpu)) {
659 		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
660 		vcpu->arch.guest_kernel_asid[cpu] =
661 		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
662 		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
663 		vcpu->arch.guest_user_asid[cpu] =
664 		    vcpu->arch.guest_user_mm.context.asid[cpu];
665 		newasid++;
666 
667 		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
668 			  cpu_context(cpu, current->mm));
669 		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
670 			  cpu, vcpu->arch.guest_kernel_asid[cpu]);
671 		kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
672 			  vcpu->arch.guest_user_asid[cpu]);
673 	}
674 
675 	if (vcpu->arch.last_sched_cpu != cpu) {
676 		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
677 			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
678 		/*
679 		 * Migrate the timer interrupt to the current CPU so that it
680 		 * always interrupts the guest and synchronously triggers a
681 		 * guest timer interrupt.
682 		 */
683 		kvm_mips_migrate_count(vcpu);
684 	}
685 
686 	if (!newasid) {
687 		/*
688 		 * If we preempted while the guest was executing, then reload
689 		 * the pre-empted ASID
690 		 */
691 		if (current->flags & PF_VCPU) {
692 			write_c0_entryhi(vcpu->arch.
693 					 preempt_entryhi & asid_mask);
694 			ehb();
695 		}
696 	} else {
697 		/* New ASIDs were allocated for the VM */
698 
699 		/*
700 		 * Were we in guest context? If so then the pre-empted ASID is
701 		 * no longer valid, we need to set it to what it should be based
702 		 * on the mode of the Guest (Kernel/User)
703 		 */
704 		if (current->flags & PF_VCPU) {
705 			if (KVM_GUEST_KERNEL_MODE(vcpu))
706 				write_c0_entryhi(vcpu->arch.
707 						 guest_kernel_asid[cpu] &
708 						 asid_mask);
709 			else
710 				write_c0_entryhi(vcpu->arch.
711 						 guest_user_asid[cpu] &
712 						 asid_mask);
713 			ehb();
714 		}
715 	}
716 
717 	/* restore guest state to registers */
718 	kvm_mips_callbacks->vcpu_set_regs(vcpu);
719 
720 	local_irq_restore(flags);
721 
722 }
723 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
724 
725 /* ASID can change if another task is scheduled during preemption */
726 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
727 {
728 	unsigned long flags;
729 	uint32_t cpu;
730 
731 	local_irq_save(flags);
732 
733 	cpu = smp_processor_id();
734 
735 	vcpu->arch.preempt_entryhi = read_c0_entryhi();
736 	vcpu->arch.last_sched_cpu = cpu;
737 
738 	/* save guest state in registers */
739 	kvm_mips_callbacks->vcpu_get_regs(vcpu);
740 
741 	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
742 	     asid_version_mask(cpu))) {
743 		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
744 			  cpu_context(cpu, current->mm));
745 		drop_mmu_context(current->mm, cpu);
746 	}
747 	write_c0_entryhi(cpu_asid(cpu, current->mm));
748 	ehb();
749 
750 	local_irq_restore(flags);
751 }
752 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
753 
754 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
755 {
756 	struct mips_coproc *cop0 = vcpu->arch.cop0;
757 	unsigned long paddr, flags, vpn2, asid;
758 	uint32_t inst;
759 	int index;
760 
761 	if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
762 	    KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
763 		local_irq_save(flags);
764 		index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
765 		if (index >= 0) {
766 			inst = *(opc);
767 		} else {
768 			vpn2 = (unsigned long) opc & VPN2_MASK;
769 			asid = kvm_read_c0_guest_entryhi(cop0) &
770 						KVM_ENTRYHI_ASID;
771 			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
772 			if (index < 0) {
773 				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
774 					__func__, opc, vcpu, read_c0_entryhi());
775 				kvm_mips_dump_host_tlbs();
776 				local_irq_restore(flags);
777 				return KVM_INVALID_INST;
778 			}
779 			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
780 							     &vcpu->arch.
781 							     guest_tlb[index],
782 							     NULL, NULL);
783 			inst = *(opc);
784 		}
785 		local_irq_restore(flags);
786 	} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
787 		paddr =
788 		    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
789 							  (unsigned long) opc);
790 		inst = *(uint32_t *) CKSEG0ADDR(paddr);
791 	} else {
792 		kvm_err("%s: illegal address: %p\n", __func__, opc);
793 		return KVM_INVALID_INST;
794 	}
795 
796 	return inst;
797 }
798 EXPORT_SYMBOL_GPL(kvm_get_inst);
799