xref: /openbmc/linux/arch/mips/kvm/tlb.c (revision bbcd53c9)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7  * TLB handlers run from KSEG0
8  *
9  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
10  * Authors: Sanjay Lal <sanjayl@kymasys.com>
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
20 
21 #include <asm/cpu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mipsregs.h>
24 #include <asm/mmu_context.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlb.h>
27 #include <asm/tlbdebug.h>
28 
29 #undef CONFIG_MIPS_MT
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
32 
33 unsigned long GUESTID_MASK;
34 EXPORT_SYMBOL_GPL(GUESTID_MASK);
35 unsigned long GUESTID_FIRST_VERSION;
36 EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
37 unsigned long GUESTID_VERSION_MASK;
38 EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
39 
40 static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
41 {
42 	struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
43 
44 	if (cpu_has_guestid)
45 		return 0;
46 	else
47 		return cpu_asid(smp_processor_id(), gpa_mm);
48 }
49 
50 static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
51 {
52 	int idx;
53 
54 	write_c0_entryhi(entryhi);
55 	mtc0_tlbw_hazard();
56 
57 	tlb_probe();
58 	tlb_probe_hazard();
59 	idx = read_c0_index();
60 
61 	if (idx >= current_cpu_data.tlbsize)
62 		BUG();
63 
64 	if (idx >= 0) {
65 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
66 		write_c0_entrylo0(0);
67 		write_c0_entrylo1(0);
68 		mtc0_tlbw_hazard();
69 
70 		tlb_write_indexed();
71 		tlbw_use_hazard();
72 	}
73 
74 	return idx;
75 }
76 
77 /* GuestID management */
78 
79 /**
80  * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
81  */
82 static inline void clear_root_gid(void)
83 {
84 	if (cpu_has_guestid) {
85 		clear_c0_guestctl1(MIPS_GCTL1_RID);
86 		mtc0_tlbw_hazard();
87 	}
88 }
89 
90 /**
91  * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
92  *
93  * Sets the root GuestID to match the current guest GuestID, for TLB operation
94  * on the GPA->RPA mappings in the root TLB.
95  *
96  * The caller must be sure to disable HTW while the root GID is set, and
97  * possibly longer if TLB registers are modified.
98  */
99 static inline void set_root_gid_to_guest_gid(void)
100 {
101 	unsigned int guestctl1;
102 
103 	if (cpu_has_guestid) {
104 		back_to_back_c0_hazard();
105 		guestctl1 = read_c0_guestctl1();
106 		guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
107 			((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
108 						     << MIPS_GCTL1_RID_SHIFT;
109 		write_c0_guestctl1(guestctl1);
110 		mtc0_tlbw_hazard();
111 	}
112 }
113 
114 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
115 {
116 	int idx;
117 	unsigned long flags, old_entryhi;
118 
119 	local_irq_save(flags);
120 	htw_stop();
121 
122 	/* Set root GuestID for root probe and write of guest TLB entry */
123 	set_root_gid_to_guest_gid();
124 
125 	old_entryhi = read_c0_entryhi();
126 
127 	idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
128 				     kvm_mips_get_root_asid(vcpu));
129 
130 	write_c0_entryhi(old_entryhi);
131 	clear_root_gid();
132 	mtc0_tlbw_hazard();
133 
134 	htw_start();
135 	local_irq_restore(flags);
136 
137 	/*
138 	 * We don't want to get reserved instruction exceptions for missing tlb
139 	 * entries.
140 	 */
141 	if (cpu_has_vtag_icache)
142 		flush_icache_all();
143 
144 	if (idx > 0)
145 		kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
146 			  __func__, (va & VPN2_MASK) |
147 				    kvm_mips_get_root_asid(vcpu), idx);
148 
149 	return 0;
150 }
151 EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
152 
153 /**
154  * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
155  * @vcpu:	KVM VCPU pointer.
156  * @gpa:	Guest virtual address in a TLB mapped guest segment.
157  * @gpa:	Ponter to output guest physical address it maps to.
158  *
159  * Converts a guest virtual address in a guest TLB mapped segment to a guest
160  * physical address, by probing the guest TLB.
161  *
162  * Returns:	0 if guest TLB mapping exists for @gva. *@gpa will have been
163  *		written.
164  *		-EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
165  *		have been written.
166  */
167 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
168 			    unsigned long *gpa)
169 {
170 	unsigned long o_entryhi, o_entrylo[2], o_pagemask;
171 	unsigned int o_index;
172 	unsigned long entrylo[2], pagemask, pagemaskbit, pa;
173 	unsigned long flags;
174 	int index;
175 
176 	/* Probe the guest TLB for a mapping */
177 	local_irq_save(flags);
178 	/* Set root GuestID for root probe of guest TLB entry */
179 	htw_stop();
180 	set_root_gid_to_guest_gid();
181 
182 	o_entryhi = read_gc0_entryhi();
183 	o_index = read_gc0_index();
184 
185 	write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
186 	mtc0_tlbw_hazard();
187 	guest_tlb_probe();
188 	tlb_probe_hazard();
189 
190 	index = read_gc0_index();
191 	if (index < 0) {
192 		/* No match, fail */
193 		write_gc0_entryhi(o_entryhi);
194 		write_gc0_index(o_index);
195 
196 		clear_root_gid();
197 		htw_start();
198 		local_irq_restore(flags);
199 		return -EFAULT;
200 	}
201 
202 	/* Match! read the TLB entry */
203 	o_entrylo[0] = read_gc0_entrylo0();
204 	o_entrylo[1] = read_gc0_entrylo1();
205 	o_pagemask = read_gc0_pagemask();
206 
207 	mtc0_tlbr_hazard();
208 	guest_tlb_read();
209 	tlb_read_hazard();
210 
211 	entrylo[0] = read_gc0_entrylo0();
212 	entrylo[1] = read_gc0_entrylo1();
213 	pagemask = ~read_gc0_pagemask() & ~0x1fffl;
214 
215 	write_gc0_entryhi(o_entryhi);
216 	write_gc0_index(o_index);
217 	write_gc0_entrylo0(o_entrylo[0]);
218 	write_gc0_entrylo1(o_entrylo[1]);
219 	write_gc0_pagemask(o_pagemask);
220 
221 	clear_root_gid();
222 	htw_start();
223 	local_irq_restore(flags);
224 
225 	/* Select one of the EntryLo values and interpret the GPA */
226 	pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
227 	pa = entrylo[!!(gva & pagemaskbit)];
228 
229 	/*
230 	 * TLB entry may have become invalid since TLB probe if physical FTLB
231 	 * entries are shared between threads (e.g. I6400).
232 	 */
233 	if (!(pa & ENTRYLO_V))
234 		return -EFAULT;
235 
236 	/*
237 	 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
238 	 * split with XI/RI in the middle.
239 	 */
240 	pa = (pa << 6) & ~0xfffl;
241 	pa |= gva & ~(pagemask | pagemaskbit);
242 
243 	*gpa = pa;
244 	return 0;
245 }
246 EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
247 
248 /**
249  * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
250  * guests.
251  *
252  * Invalidate all entries in root tlb which are GPA mappings.
253  */
254 void kvm_vz_local_flush_roottlb_all_guests(void)
255 {
256 	unsigned long flags;
257 	unsigned long old_entryhi, old_pagemask, old_guestctl1;
258 	int entry;
259 
260 	if (WARN_ON(!cpu_has_guestid))
261 		return;
262 
263 	local_irq_save(flags);
264 	htw_stop();
265 
266 	/* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
267 	old_entryhi = read_c0_entryhi();
268 	old_pagemask = read_c0_pagemask();
269 	old_guestctl1 = read_c0_guestctl1();
270 
271 	/*
272 	 * Invalidate guest entries in root TLB while leaving root entries
273 	 * intact when possible.
274 	 */
275 	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
276 		write_c0_index(entry);
277 		mtc0_tlbw_hazard();
278 		tlb_read();
279 		tlb_read_hazard();
280 
281 		/* Don't invalidate non-guest (RVA) mappings in the root TLB */
282 		if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
283 			continue;
284 
285 		/* Make sure all entries differ. */
286 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
287 		write_c0_entrylo0(0);
288 		write_c0_entrylo1(0);
289 		write_c0_guestctl1(0);
290 		mtc0_tlbw_hazard();
291 		tlb_write_indexed();
292 	}
293 
294 	write_c0_entryhi(old_entryhi);
295 	write_c0_pagemask(old_pagemask);
296 	write_c0_guestctl1(old_guestctl1);
297 	tlbw_use_hazard();
298 
299 	htw_start();
300 	local_irq_restore(flags);
301 }
302 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
303 
304 /**
305  * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
306  *
307  * Invalidate all entries in guest tlb irrespective of guestid.
308  */
309 void kvm_vz_local_flush_guesttlb_all(void)
310 {
311 	unsigned long flags;
312 	unsigned long old_index;
313 	unsigned long old_entryhi;
314 	unsigned long old_entrylo[2];
315 	unsigned long old_pagemask;
316 	int entry;
317 	u64 cvmmemctl2 = 0;
318 
319 	local_irq_save(flags);
320 
321 	/* Preserve all clobbered guest registers */
322 	old_index = read_gc0_index();
323 	old_entryhi = read_gc0_entryhi();
324 	old_entrylo[0] = read_gc0_entrylo0();
325 	old_entrylo[1] = read_gc0_entrylo1();
326 	old_pagemask = read_gc0_pagemask();
327 
328 	switch (current_cpu_type()) {
329 	case CPU_CAVIUM_OCTEON3:
330 		/* Inhibit machine check due to multiple matching TLB entries */
331 		cvmmemctl2 = read_c0_cvmmemctl2();
332 		cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
333 		write_c0_cvmmemctl2(cvmmemctl2);
334 		break;
335 	}
336 
337 	/* Invalidate guest entries in guest TLB */
338 	write_gc0_entrylo0(0);
339 	write_gc0_entrylo1(0);
340 	write_gc0_pagemask(0);
341 	for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
342 		/* Make sure all entries differ. */
343 		write_gc0_index(entry);
344 		write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
345 		mtc0_tlbw_hazard();
346 		guest_tlb_write_indexed();
347 	}
348 
349 	if (cvmmemctl2) {
350 		cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
351 		write_c0_cvmmemctl2(cvmmemctl2);
352 	}
353 
354 	write_gc0_index(old_index);
355 	write_gc0_entryhi(old_entryhi);
356 	write_gc0_entrylo0(old_entrylo[0]);
357 	write_gc0_entrylo1(old_entrylo[1]);
358 	write_gc0_pagemask(old_pagemask);
359 	tlbw_use_hazard();
360 
361 	local_irq_restore(flags);
362 }
363 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
364 
365 /**
366  * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
367  * @buf:	Buffer to write TLB entries into.
368  * @index:	Start index.
369  * @count:	Number of entries to save.
370  *
371  * Save a range of guest TLB entries. The caller must ensure interrupts are
372  * disabled.
373  */
374 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
375 			  unsigned int count)
376 {
377 	unsigned int end = index + count;
378 	unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
379 	unsigned int guestctl1 = 0;
380 	int old_index, i;
381 
382 	/* Save registers we're about to clobber */
383 	old_index = read_gc0_index();
384 	old_entryhi = read_gc0_entryhi();
385 	old_entrylo0 = read_gc0_entrylo0();
386 	old_entrylo1 = read_gc0_entrylo1();
387 	old_pagemask = read_gc0_pagemask();
388 
389 	/* Set root GuestID for root probe */
390 	htw_stop();
391 	set_root_gid_to_guest_gid();
392 	if (cpu_has_guestid)
393 		guestctl1 = read_c0_guestctl1();
394 
395 	/* Read each entry from guest TLB */
396 	for (i = index; i < end; ++i, ++buf) {
397 		write_gc0_index(i);
398 
399 		mtc0_tlbr_hazard();
400 		guest_tlb_read();
401 		tlb_read_hazard();
402 
403 		if (cpu_has_guestid &&
404 		    (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
405 			/* Entry invalid or belongs to another guest */
406 			buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
407 			buf->tlb_lo[0] = 0;
408 			buf->tlb_lo[1] = 0;
409 			buf->tlb_mask = 0;
410 		} else {
411 			/* Entry belongs to the right guest */
412 			buf->tlb_hi = read_gc0_entryhi();
413 			buf->tlb_lo[0] = read_gc0_entrylo0();
414 			buf->tlb_lo[1] = read_gc0_entrylo1();
415 			buf->tlb_mask = read_gc0_pagemask();
416 		}
417 	}
418 
419 	/* Clear root GuestID again */
420 	clear_root_gid();
421 	htw_start();
422 
423 	/* Restore clobbered registers */
424 	write_gc0_index(old_index);
425 	write_gc0_entryhi(old_entryhi);
426 	write_gc0_entrylo0(old_entrylo0);
427 	write_gc0_entrylo1(old_entrylo1);
428 	write_gc0_pagemask(old_pagemask);
429 
430 	tlbw_use_hazard();
431 }
432 EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
433 
434 /**
435  * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
436  * @buf:	Buffer to read TLB entries from.
437  * @index:	Start index.
438  * @count:	Number of entries to load.
439  *
440  * Load a range of guest TLB entries. The caller must ensure interrupts are
441  * disabled.
442  */
443 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
444 			  unsigned int count)
445 {
446 	unsigned int end = index + count;
447 	unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
448 	int old_index, i;
449 
450 	/* Save registers we're about to clobber */
451 	old_index = read_gc0_index();
452 	old_entryhi = read_gc0_entryhi();
453 	old_entrylo0 = read_gc0_entrylo0();
454 	old_entrylo1 = read_gc0_entrylo1();
455 	old_pagemask = read_gc0_pagemask();
456 
457 	/* Set root GuestID for root probe */
458 	htw_stop();
459 	set_root_gid_to_guest_gid();
460 
461 	/* Write each entry to guest TLB */
462 	for (i = index; i < end; ++i, ++buf) {
463 		write_gc0_index(i);
464 		write_gc0_entryhi(buf->tlb_hi);
465 		write_gc0_entrylo0(buf->tlb_lo[0]);
466 		write_gc0_entrylo1(buf->tlb_lo[1]);
467 		write_gc0_pagemask(buf->tlb_mask);
468 
469 		mtc0_tlbw_hazard();
470 		guest_tlb_write_indexed();
471 	}
472 
473 	/* Clear root GuestID again */
474 	clear_root_gid();
475 	htw_start();
476 
477 	/* Restore clobbered registers */
478 	write_gc0_index(old_index);
479 	write_gc0_entryhi(old_entryhi);
480 	write_gc0_entrylo0(old_entrylo0);
481 	write_gc0_entrylo1(old_entrylo1);
482 	write_gc0_pagemask(old_pagemask);
483 
484 	tlbw_use_hazard();
485 }
486 EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
487 
488 #ifdef CONFIG_CPU_LOONGSON64
489 void kvm_loongson_clear_guest_vtlb(void)
490 {
491 	int idx = read_gc0_index();
492 
493 	/* Set root GuestID for root probe and write of guest TLB entry */
494 	set_root_gid_to_guest_gid();
495 
496 	write_gc0_index(0);
497 	guest_tlbinvf();
498 	write_gc0_index(idx);
499 
500 	clear_root_gid();
501 	set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
502 }
503 EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb);
504 
505 void kvm_loongson_clear_guest_ftlb(void)
506 {
507 	int i;
508 	int idx = read_gc0_index();
509 
510 	/* Set root GuestID for root probe and write of guest TLB entry */
511 	set_root_gid_to_guest_gid();
512 
513 	for (i = current_cpu_data.tlbsizevtlb;
514 	     i < (current_cpu_data.tlbsizevtlb +
515 		     current_cpu_data.tlbsizeftlbsets);
516 	     i++) {
517 		write_gc0_index(i);
518 		guest_tlbinvf();
519 	}
520 	write_gc0_index(idx);
521 
522 	clear_root_gid();
523 	set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
524 }
525 EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
526 #endif
527