xref: /openbmc/linux/arch/mips/kvm/mips.c (revision 33ac9dba)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: MIPS specific KVM APIs
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11 
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16 #include <linux/fs.h>
17 #include <linux/bootmem.h>
18 #include <asm/page.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21 
22 #include <linux/kvm_host.h>
23 
24 #include "interrupt.h"
25 #include "commpage.h"
26 
27 #define CREATE_TRACE_POINTS
28 #include "trace.h"
29 
30 #ifndef VECTORSPACING
31 #define VECTORSPACING 0x100	/* for EI/VI mode */
32 #endif
33 
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
35 struct kvm_stats_debugfs_item debugfs_entries[] = {
36 	{ "wait",	  VCPU_STAT(wait_exits),	 KVM_STAT_VCPU },
37 	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
38 	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
39 	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
40 	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
41 	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
42 	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
43 	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
44 	{ "addrerr_st",	  VCPU_STAT(addrerr_st_exits),	 KVM_STAT_VCPU },
45 	{ "addrerr_ld",	  VCPU_STAT(addrerr_ld_exits),	 KVM_STAT_VCPU },
46 	{ "syscall",	  VCPU_STAT(syscall_exits),	 KVM_STAT_VCPU },
47 	{ "resvd_inst",	  VCPU_STAT(resvd_inst_exits),	 KVM_STAT_VCPU },
48 	{ "break_inst",	  VCPU_STAT(break_inst_exits),	 KVM_STAT_VCPU },
49 	{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
50 	{ "halt_wakeup",  VCPU_STAT(halt_wakeup),	 KVM_STAT_VCPU },
51 	{NULL}
52 };
53 
54 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55 {
56 	int i;
57 
58 	for_each_possible_cpu(i) {
59 		vcpu->arch.guest_kernel_asid[i] = 0;
60 		vcpu->arch.guest_user_asid[i] = 0;
61 	}
62 
63 	return 0;
64 }
65 
66 /*
67  * XXXKYMA: We are simulatoring a processor that has the WII bit set in
68  * Config7, so we are "runnable" if interrupts are pending
69  */
70 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
71 {
72 	return !!(vcpu->arch.pending_exceptions);
73 }
74 
75 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
76 {
77 	return 1;
78 }
79 
80 int kvm_arch_hardware_enable(void *garbage)
81 {
82 	return 0;
83 }
84 
85 void kvm_arch_hardware_disable(void *garbage)
86 {
87 }
88 
89 int kvm_arch_hardware_setup(void)
90 {
91 	return 0;
92 }
93 
94 void kvm_arch_hardware_unsetup(void)
95 {
96 }
97 
98 void kvm_arch_check_processor_compat(void *rtn)
99 {
100 	*(int *)rtn = 0;
101 }
102 
103 static void kvm_mips_init_tlbs(struct kvm *kvm)
104 {
105 	unsigned long wired;
106 
107 	/*
108 	 * Add a wired entry to the TLB, it is used to map the commpage to
109 	 * the Guest kernel
110 	 */
111 	wired = read_c0_wired();
112 	write_c0_wired(wired + 1);
113 	mtc0_tlbw_hazard();
114 	kvm->arch.commpage_tlb = wired;
115 
116 	kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
117 		  kvm->arch.commpage_tlb);
118 }
119 
120 static void kvm_mips_init_vm_percpu(void *arg)
121 {
122 	struct kvm *kvm = (struct kvm *)arg;
123 
124 	kvm_mips_init_tlbs(kvm);
125 	kvm_mips_callbacks->vm_init(kvm);
126 
127 }
128 
129 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
130 {
131 	if (atomic_inc_return(&kvm_mips_instance) == 1) {
132 		kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
133 			  __func__);
134 		on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
135 	}
136 
137 	return 0;
138 }
139 
140 void kvm_mips_free_vcpus(struct kvm *kvm)
141 {
142 	unsigned int i;
143 	struct kvm_vcpu *vcpu;
144 
145 	/* Put the pages we reserved for the guest pmap */
146 	for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
147 		if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
148 			kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
149 	}
150 	kfree(kvm->arch.guest_pmap);
151 
152 	kvm_for_each_vcpu(i, vcpu, kvm) {
153 		kvm_arch_vcpu_free(vcpu);
154 	}
155 
156 	mutex_lock(&kvm->lock);
157 
158 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
159 		kvm->vcpus[i] = NULL;
160 
161 	atomic_set(&kvm->online_vcpus, 0);
162 
163 	mutex_unlock(&kvm->lock);
164 }
165 
166 void kvm_arch_sync_events(struct kvm *kvm)
167 {
168 }
169 
170 static void kvm_mips_uninit_tlbs(void *arg)
171 {
172 	/* Restore wired count */
173 	write_c0_wired(0);
174 	mtc0_tlbw_hazard();
175 	/* Clear out all the TLBs */
176 	kvm_local_flush_tlb_all();
177 }
178 
179 void kvm_arch_destroy_vm(struct kvm *kvm)
180 {
181 	kvm_mips_free_vcpus(kvm);
182 
183 	/* If this is the last instance, restore wired count */
184 	if (atomic_dec_return(&kvm_mips_instance) == 0) {
185 		kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
186 			  __func__);
187 		on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
188 	}
189 }
190 
191 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
192 			unsigned long arg)
193 {
194 	return -ENOIOCTLCMD;
195 }
196 
197 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
198 			   struct kvm_memory_slot *dont)
199 {
200 }
201 
202 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
203 			    unsigned long npages)
204 {
205 	return 0;
206 }
207 
208 void kvm_arch_memslots_updated(struct kvm *kvm)
209 {
210 }
211 
212 int kvm_arch_prepare_memory_region(struct kvm *kvm,
213 				   struct kvm_memory_slot *memslot,
214 				   struct kvm_userspace_memory_region *mem,
215 				   enum kvm_mr_change change)
216 {
217 	return 0;
218 }
219 
220 void kvm_arch_commit_memory_region(struct kvm *kvm,
221 				   struct kvm_userspace_memory_region *mem,
222 				   const struct kvm_memory_slot *old,
223 				   enum kvm_mr_change change)
224 {
225 	unsigned long npages = 0;
226 	int i;
227 
228 	kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
229 		  __func__, kvm, mem->slot, mem->guest_phys_addr,
230 		  mem->memory_size, mem->userspace_addr);
231 
232 	/* Setup Guest PMAP table */
233 	if (!kvm->arch.guest_pmap) {
234 		if (mem->slot == 0)
235 			npages = mem->memory_size >> PAGE_SHIFT;
236 
237 		if (npages) {
238 			kvm->arch.guest_pmap_npages = npages;
239 			kvm->arch.guest_pmap =
240 			    kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
241 
242 			if (!kvm->arch.guest_pmap) {
243 				kvm_err("Failed to allocate guest PMAP");
244 				return;
245 			}
246 
247 			kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
248 				  npages, kvm->arch.guest_pmap);
249 
250 			/* Now setup the page table */
251 			for (i = 0; i < npages; i++)
252 				kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
253 		}
254 	}
255 }
256 
257 void kvm_arch_flush_shadow_all(struct kvm *kvm)
258 {
259 }
260 
261 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
262 				   struct kvm_memory_slot *slot)
263 {
264 }
265 
266 void kvm_arch_flush_shadow(struct kvm *kvm)
267 {
268 }
269 
270 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
271 {
272 	int err, size, offset;
273 	void *gebase;
274 	int i;
275 
276 	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
277 
278 	if (!vcpu) {
279 		err = -ENOMEM;
280 		goto out;
281 	}
282 
283 	err = kvm_vcpu_init(vcpu, kvm, id);
284 
285 	if (err)
286 		goto out_free_cpu;
287 
288 	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
289 
290 	/*
291 	 * Allocate space for host mode exception handlers that handle
292 	 * guest mode exits
293 	 */
294 	if (cpu_has_veic || cpu_has_vint)
295 		size = 0x200 + VECTORSPACING * 64;
296 	else
297 		size = 0x4000;
298 
299 	/* Save Linux EBASE */
300 	vcpu->arch.host_ebase = (void *)read_c0_ebase();
301 
302 	gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
303 
304 	if (!gebase) {
305 		err = -ENOMEM;
306 		goto out_free_cpu;
307 	}
308 	kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
309 		  ALIGN(size, PAGE_SIZE), gebase);
310 
311 	/* Save new ebase */
312 	vcpu->arch.guest_ebase = gebase;
313 
314 	/* Copy L1 Guest Exception handler to correct offset */
315 
316 	/* TLB Refill, EXL = 0 */
317 	memcpy(gebase, mips32_exception,
318 	       mips32_exceptionEnd - mips32_exception);
319 
320 	/* General Exception Entry point */
321 	memcpy(gebase + 0x180, mips32_exception,
322 	       mips32_exceptionEnd - mips32_exception);
323 
324 	/* For vectored interrupts poke the exception code @ all offsets 0-7 */
325 	for (i = 0; i < 8; i++) {
326 		kvm_debug("L1 Vectored handler @ %p\n",
327 			  gebase + 0x200 + (i * VECTORSPACING));
328 		memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
329 		       mips32_exceptionEnd - mips32_exception);
330 	}
331 
332 	/* General handler, relocate to unmapped space for sanity's sake */
333 	offset = 0x2000;
334 	kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
335 		  gebase + offset,
336 		  mips32_GuestExceptionEnd - mips32_GuestException);
337 
338 	memcpy(gebase + offset, mips32_GuestException,
339 	       mips32_GuestExceptionEnd - mips32_GuestException);
340 
341 	/* Invalidate the icache for these ranges */
342 	local_flush_icache_range((unsigned long)gebase,
343 				(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
344 
345 	/*
346 	 * Allocate comm page for guest kernel, a TLB will be reserved for
347 	 * mapping GVA @ 0xFFFF8000 to this page
348 	 */
349 	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
350 
351 	if (!vcpu->arch.kseg0_commpage) {
352 		err = -ENOMEM;
353 		goto out_free_gebase;
354 	}
355 
356 	kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
357 	kvm_mips_commpage_init(vcpu);
358 
359 	/* Init */
360 	vcpu->arch.last_sched_cpu = -1;
361 
362 	/* Start off the timer */
363 	kvm_mips_init_count(vcpu);
364 
365 	return vcpu;
366 
367 out_free_gebase:
368 	kfree(gebase);
369 
370 out_free_cpu:
371 	kfree(vcpu);
372 
373 out:
374 	return ERR_PTR(err);
375 }
376 
377 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
378 {
379 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
380 
381 	kvm_vcpu_uninit(vcpu);
382 
383 	kvm_mips_dump_stats(vcpu);
384 
385 	kfree(vcpu->arch.guest_ebase);
386 	kfree(vcpu->arch.kseg0_commpage);
387 	kfree(vcpu);
388 }
389 
390 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
391 {
392 	kvm_arch_vcpu_free(vcpu);
393 }
394 
395 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
396 					struct kvm_guest_debug *dbg)
397 {
398 	return -ENOIOCTLCMD;
399 }
400 
401 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
402 {
403 	int r = 0;
404 	sigset_t sigsaved;
405 
406 	if (vcpu->sigset_active)
407 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
408 
409 	if (vcpu->mmio_needed) {
410 		if (!vcpu->mmio_is_write)
411 			kvm_mips_complete_mmio_load(vcpu, run);
412 		vcpu->mmio_needed = 0;
413 	}
414 
415 	local_irq_disable();
416 	/* Check if we have any exceptions/interrupts pending */
417 	kvm_mips_deliver_interrupts(vcpu,
418 				    kvm_read_c0_guest_cause(vcpu->arch.cop0));
419 
420 	kvm_guest_enter();
421 
422 	r = __kvm_mips_vcpu_run(run, vcpu);
423 
424 	kvm_guest_exit();
425 	local_irq_enable();
426 
427 	if (vcpu->sigset_active)
428 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
429 
430 	return r;
431 }
432 
433 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
434 			     struct kvm_mips_interrupt *irq)
435 {
436 	int intr = (int)irq->irq;
437 	struct kvm_vcpu *dvcpu = NULL;
438 
439 	if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
440 		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
441 			  (int)intr);
442 
443 	if (irq->cpu == -1)
444 		dvcpu = vcpu;
445 	else
446 		dvcpu = vcpu->kvm->vcpus[irq->cpu];
447 
448 	if (intr == 2 || intr == 3 || intr == 4) {
449 		kvm_mips_callbacks->queue_io_int(dvcpu, irq);
450 
451 	} else if (intr == -2 || intr == -3 || intr == -4) {
452 		kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
453 	} else {
454 		kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
455 			irq->cpu, irq->irq);
456 		return -EINVAL;
457 	}
458 
459 	dvcpu->arch.wait = 0;
460 
461 	if (waitqueue_active(&dvcpu->wq))
462 		wake_up_interruptible(&dvcpu->wq);
463 
464 	return 0;
465 }
466 
467 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
468 				    struct kvm_mp_state *mp_state)
469 {
470 	return -ENOIOCTLCMD;
471 }
472 
473 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
474 				    struct kvm_mp_state *mp_state)
475 {
476 	return -ENOIOCTLCMD;
477 }
478 
479 static u64 kvm_mips_get_one_regs[] = {
480 	KVM_REG_MIPS_R0,
481 	KVM_REG_MIPS_R1,
482 	KVM_REG_MIPS_R2,
483 	KVM_REG_MIPS_R3,
484 	KVM_REG_MIPS_R4,
485 	KVM_REG_MIPS_R5,
486 	KVM_REG_MIPS_R6,
487 	KVM_REG_MIPS_R7,
488 	KVM_REG_MIPS_R8,
489 	KVM_REG_MIPS_R9,
490 	KVM_REG_MIPS_R10,
491 	KVM_REG_MIPS_R11,
492 	KVM_REG_MIPS_R12,
493 	KVM_REG_MIPS_R13,
494 	KVM_REG_MIPS_R14,
495 	KVM_REG_MIPS_R15,
496 	KVM_REG_MIPS_R16,
497 	KVM_REG_MIPS_R17,
498 	KVM_REG_MIPS_R18,
499 	KVM_REG_MIPS_R19,
500 	KVM_REG_MIPS_R20,
501 	KVM_REG_MIPS_R21,
502 	KVM_REG_MIPS_R22,
503 	KVM_REG_MIPS_R23,
504 	KVM_REG_MIPS_R24,
505 	KVM_REG_MIPS_R25,
506 	KVM_REG_MIPS_R26,
507 	KVM_REG_MIPS_R27,
508 	KVM_REG_MIPS_R28,
509 	KVM_REG_MIPS_R29,
510 	KVM_REG_MIPS_R30,
511 	KVM_REG_MIPS_R31,
512 
513 	KVM_REG_MIPS_HI,
514 	KVM_REG_MIPS_LO,
515 	KVM_REG_MIPS_PC,
516 
517 	KVM_REG_MIPS_CP0_INDEX,
518 	KVM_REG_MIPS_CP0_CONTEXT,
519 	KVM_REG_MIPS_CP0_USERLOCAL,
520 	KVM_REG_MIPS_CP0_PAGEMASK,
521 	KVM_REG_MIPS_CP0_WIRED,
522 	KVM_REG_MIPS_CP0_HWRENA,
523 	KVM_REG_MIPS_CP0_BADVADDR,
524 	KVM_REG_MIPS_CP0_COUNT,
525 	KVM_REG_MIPS_CP0_ENTRYHI,
526 	KVM_REG_MIPS_CP0_COMPARE,
527 	KVM_REG_MIPS_CP0_STATUS,
528 	KVM_REG_MIPS_CP0_CAUSE,
529 	KVM_REG_MIPS_CP0_EPC,
530 	KVM_REG_MIPS_CP0_CONFIG,
531 	KVM_REG_MIPS_CP0_CONFIG1,
532 	KVM_REG_MIPS_CP0_CONFIG2,
533 	KVM_REG_MIPS_CP0_CONFIG3,
534 	KVM_REG_MIPS_CP0_CONFIG7,
535 	KVM_REG_MIPS_CP0_ERROREPC,
536 
537 	KVM_REG_MIPS_COUNT_CTL,
538 	KVM_REG_MIPS_COUNT_RESUME,
539 	KVM_REG_MIPS_COUNT_HZ,
540 };
541 
542 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
543 			    const struct kvm_one_reg *reg)
544 {
545 	struct mips_coproc *cop0 = vcpu->arch.cop0;
546 	int ret;
547 	s64 v;
548 
549 	switch (reg->id) {
550 	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
551 		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
552 		break;
553 	case KVM_REG_MIPS_HI:
554 		v = (long)vcpu->arch.hi;
555 		break;
556 	case KVM_REG_MIPS_LO:
557 		v = (long)vcpu->arch.lo;
558 		break;
559 	case KVM_REG_MIPS_PC:
560 		v = (long)vcpu->arch.pc;
561 		break;
562 
563 	case KVM_REG_MIPS_CP0_INDEX:
564 		v = (long)kvm_read_c0_guest_index(cop0);
565 		break;
566 	case KVM_REG_MIPS_CP0_CONTEXT:
567 		v = (long)kvm_read_c0_guest_context(cop0);
568 		break;
569 	case KVM_REG_MIPS_CP0_USERLOCAL:
570 		v = (long)kvm_read_c0_guest_userlocal(cop0);
571 		break;
572 	case KVM_REG_MIPS_CP0_PAGEMASK:
573 		v = (long)kvm_read_c0_guest_pagemask(cop0);
574 		break;
575 	case KVM_REG_MIPS_CP0_WIRED:
576 		v = (long)kvm_read_c0_guest_wired(cop0);
577 		break;
578 	case KVM_REG_MIPS_CP0_HWRENA:
579 		v = (long)kvm_read_c0_guest_hwrena(cop0);
580 		break;
581 	case KVM_REG_MIPS_CP0_BADVADDR:
582 		v = (long)kvm_read_c0_guest_badvaddr(cop0);
583 		break;
584 	case KVM_REG_MIPS_CP0_ENTRYHI:
585 		v = (long)kvm_read_c0_guest_entryhi(cop0);
586 		break;
587 	case KVM_REG_MIPS_CP0_COMPARE:
588 		v = (long)kvm_read_c0_guest_compare(cop0);
589 		break;
590 	case KVM_REG_MIPS_CP0_STATUS:
591 		v = (long)kvm_read_c0_guest_status(cop0);
592 		break;
593 	case KVM_REG_MIPS_CP0_CAUSE:
594 		v = (long)kvm_read_c0_guest_cause(cop0);
595 		break;
596 	case KVM_REG_MIPS_CP0_EPC:
597 		v = (long)kvm_read_c0_guest_epc(cop0);
598 		break;
599 	case KVM_REG_MIPS_CP0_ERROREPC:
600 		v = (long)kvm_read_c0_guest_errorepc(cop0);
601 		break;
602 	case KVM_REG_MIPS_CP0_CONFIG:
603 		v = (long)kvm_read_c0_guest_config(cop0);
604 		break;
605 	case KVM_REG_MIPS_CP0_CONFIG1:
606 		v = (long)kvm_read_c0_guest_config1(cop0);
607 		break;
608 	case KVM_REG_MIPS_CP0_CONFIG2:
609 		v = (long)kvm_read_c0_guest_config2(cop0);
610 		break;
611 	case KVM_REG_MIPS_CP0_CONFIG3:
612 		v = (long)kvm_read_c0_guest_config3(cop0);
613 		break;
614 	case KVM_REG_MIPS_CP0_CONFIG7:
615 		v = (long)kvm_read_c0_guest_config7(cop0);
616 		break;
617 	/* registers to be handled specially */
618 	case KVM_REG_MIPS_CP0_COUNT:
619 	case KVM_REG_MIPS_COUNT_CTL:
620 	case KVM_REG_MIPS_COUNT_RESUME:
621 	case KVM_REG_MIPS_COUNT_HZ:
622 		ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
623 		if (ret)
624 			return ret;
625 		break;
626 	default:
627 		return -EINVAL;
628 	}
629 	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
630 		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
631 
632 		return put_user(v, uaddr64);
633 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
634 		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
635 		u32 v32 = (u32)v;
636 
637 		return put_user(v32, uaddr32);
638 	} else {
639 		return -EINVAL;
640 	}
641 }
642 
643 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
644 			    const struct kvm_one_reg *reg)
645 {
646 	struct mips_coproc *cop0 = vcpu->arch.cop0;
647 	u64 v;
648 
649 	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
650 		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
651 
652 		if (get_user(v, uaddr64) != 0)
653 			return -EFAULT;
654 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
655 		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
656 		s32 v32;
657 
658 		if (get_user(v32, uaddr32) != 0)
659 			return -EFAULT;
660 		v = (s64)v32;
661 	} else {
662 		return -EINVAL;
663 	}
664 
665 	switch (reg->id) {
666 	case KVM_REG_MIPS_R0:
667 		/* Silently ignore requests to set $0 */
668 		break;
669 	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
670 		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
671 		break;
672 	case KVM_REG_MIPS_HI:
673 		vcpu->arch.hi = v;
674 		break;
675 	case KVM_REG_MIPS_LO:
676 		vcpu->arch.lo = v;
677 		break;
678 	case KVM_REG_MIPS_PC:
679 		vcpu->arch.pc = v;
680 		break;
681 
682 	case KVM_REG_MIPS_CP0_INDEX:
683 		kvm_write_c0_guest_index(cop0, v);
684 		break;
685 	case KVM_REG_MIPS_CP0_CONTEXT:
686 		kvm_write_c0_guest_context(cop0, v);
687 		break;
688 	case KVM_REG_MIPS_CP0_USERLOCAL:
689 		kvm_write_c0_guest_userlocal(cop0, v);
690 		break;
691 	case KVM_REG_MIPS_CP0_PAGEMASK:
692 		kvm_write_c0_guest_pagemask(cop0, v);
693 		break;
694 	case KVM_REG_MIPS_CP0_WIRED:
695 		kvm_write_c0_guest_wired(cop0, v);
696 		break;
697 	case KVM_REG_MIPS_CP0_HWRENA:
698 		kvm_write_c0_guest_hwrena(cop0, v);
699 		break;
700 	case KVM_REG_MIPS_CP0_BADVADDR:
701 		kvm_write_c0_guest_badvaddr(cop0, v);
702 		break;
703 	case KVM_REG_MIPS_CP0_ENTRYHI:
704 		kvm_write_c0_guest_entryhi(cop0, v);
705 		break;
706 	case KVM_REG_MIPS_CP0_STATUS:
707 		kvm_write_c0_guest_status(cop0, v);
708 		break;
709 	case KVM_REG_MIPS_CP0_EPC:
710 		kvm_write_c0_guest_epc(cop0, v);
711 		break;
712 	case KVM_REG_MIPS_CP0_ERROREPC:
713 		kvm_write_c0_guest_errorepc(cop0, v);
714 		break;
715 	/* registers to be handled specially */
716 	case KVM_REG_MIPS_CP0_COUNT:
717 	case KVM_REG_MIPS_CP0_COMPARE:
718 	case KVM_REG_MIPS_CP0_CAUSE:
719 	case KVM_REG_MIPS_COUNT_CTL:
720 	case KVM_REG_MIPS_COUNT_RESUME:
721 	case KVM_REG_MIPS_COUNT_HZ:
722 		return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
723 	default:
724 		return -EINVAL;
725 	}
726 	return 0;
727 }
728 
729 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
730 			 unsigned long arg)
731 {
732 	struct kvm_vcpu *vcpu = filp->private_data;
733 	void __user *argp = (void __user *)arg;
734 	long r;
735 
736 	switch (ioctl) {
737 	case KVM_SET_ONE_REG:
738 	case KVM_GET_ONE_REG: {
739 		struct kvm_one_reg reg;
740 
741 		if (copy_from_user(&reg, argp, sizeof(reg)))
742 			return -EFAULT;
743 		if (ioctl == KVM_SET_ONE_REG)
744 			return kvm_mips_set_reg(vcpu, &reg);
745 		else
746 			return kvm_mips_get_reg(vcpu, &reg);
747 	}
748 	case KVM_GET_REG_LIST: {
749 		struct kvm_reg_list __user *user_list = argp;
750 		u64 __user *reg_dest;
751 		struct kvm_reg_list reg_list;
752 		unsigned n;
753 
754 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
755 			return -EFAULT;
756 		n = reg_list.n;
757 		reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
758 		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
759 			return -EFAULT;
760 		if (n < reg_list.n)
761 			return -E2BIG;
762 		reg_dest = user_list->reg;
763 		if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
764 				 sizeof(kvm_mips_get_one_regs)))
765 			return -EFAULT;
766 		return 0;
767 	}
768 	case KVM_NMI:
769 		/* Treat the NMI as a CPU reset */
770 		r = kvm_mips_reset_vcpu(vcpu);
771 		break;
772 	case KVM_INTERRUPT:
773 		{
774 			struct kvm_mips_interrupt irq;
775 
776 			r = -EFAULT;
777 			if (copy_from_user(&irq, argp, sizeof(irq)))
778 				goto out;
779 
780 			kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
781 				  irq.irq);
782 
783 			r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
784 			break;
785 		}
786 	default:
787 		r = -ENOIOCTLCMD;
788 	}
789 
790 out:
791 	return r;
792 }
793 
794 /* Get (and clear) the dirty memory log for a memory slot. */
795 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
796 {
797 	struct kvm_memory_slot *memslot;
798 	unsigned long ga, ga_end;
799 	int is_dirty = 0;
800 	int r;
801 	unsigned long n;
802 
803 	mutex_lock(&kvm->slots_lock);
804 
805 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
806 	if (r)
807 		goto out;
808 
809 	/* If nothing is dirty, don't bother messing with page tables. */
810 	if (is_dirty) {
811 		memslot = &kvm->memslots->memslots[log->slot];
812 
813 		ga = memslot->base_gfn << PAGE_SHIFT;
814 		ga_end = ga + (memslot->npages << PAGE_SHIFT);
815 
816 		kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
817 			 ga_end);
818 
819 		n = kvm_dirty_bitmap_bytes(memslot);
820 		memset(memslot->dirty_bitmap, 0, n);
821 	}
822 
823 	r = 0;
824 out:
825 	mutex_unlock(&kvm->slots_lock);
826 	return r;
827 
828 }
829 
830 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
831 {
832 	long r;
833 
834 	switch (ioctl) {
835 	default:
836 		r = -ENOIOCTLCMD;
837 	}
838 
839 	return r;
840 }
841 
842 int kvm_arch_init(void *opaque)
843 {
844 	if (kvm_mips_callbacks) {
845 		kvm_err("kvm: module already exists\n");
846 		return -EEXIST;
847 	}
848 
849 	return kvm_mips_emulation_init(&kvm_mips_callbacks);
850 }
851 
852 void kvm_arch_exit(void)
853 {
854 	kvm_mips_callbacks = NULL;
855 }
856 
857 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
858 				  struct kvm_sregs *sregs)
859 {
860 	return -ENOIOCTLCMD;
861 }
862 
863 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
864 				  struct kvm_sregs *sregs)
865 {
866 	return -ENOIOCTLCMD;
867 }
868 
869 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
870 {
871 	return 0;
872 }
873 
874 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
875 {
876 	return -ENOIOCTLCMD;
877 }
878 
879 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
880 {
881 	return -ENOIOCTLCMD;
882 }
883 
884 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
885 {
886 	return VM_FAULT_SIGBUS;
887 }
888 
889 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
890 {
891 	int r;
892 
893 	switch (ext) {
894 	case KVM_CAP_ONE_REG:
895 		r = 1;
896 		break;
897 	case KVM_CAP_COALESCED_MMIO:
898 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
899 		break;
900 	default:
901 		r = 0;
902 		break;
903 	}
904 	return r;
905 }
906 
907 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
908 {
909 	return kvm_mips_pending_timer(vcpu);
910 }
911 
912 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
913 {
914 	int i;
915 	struct mips_coproc *cop0;
916 
917 	if (!vcpu)
918 		return -1;
919 
920 	kvm_debug("VCPU Register Dump:\n");
921 	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
922 	kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
923 
924 	for (i = 0; i < 32; i += 4) {
925 		kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
926 		       vcpu->arch.gprs[i],
927 		       vcpu->arch.gprs[i + 1],
928 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
929 	}
930 	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
931 	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
932 
933 	cop0 = vcpu->arch.cop0;
934 	kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
935 		  kvm_read_c0_guest_status(cop0),
936 		  kvm_read_c0_guest_cause(cop0));
937 
938 	kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
939 
940 	return 0;
941 }
942 
943 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
944 {
945 	int i;
946 
947 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
948 		vcpu->arch.gprs[i] = regs->gpr[i];
949 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
950 	vcpu->arch.hi = regs->hi;
951 	vcpu->arch.lo = regs->lo;
952 	vcpu->arch.pc = regs->pc;
953 
954 	return 0;
955 }
956 
957 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
958 {
959 	int i;
960 
961 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
962 		regs->gpr[i] = vcpu->arch.gprs[i];
963 
964 	regs->hi = vcpu->arch.hi;
965 	regs->lo = vcpu->arch.lo;
966 	regs->pc = vcpu->arch.pc;
967 
968 	return 0;
969 }
970 
971 static void kvm_mips_comparecount_func(unsigned long data)
972 {
973 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
974 
975 	kvm_mips_callbacks->queue_timer_int(vcpu);
976 
977 	vcpu->arch.wait = 0;
978 	if (waitqueue_active(&vcpu->wq))
979 		wake_up_interruptible(&vcpu->wq);
980 }
981 
982 /* low level hrtimer wake routine */
983 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
984 {
985 	struct kvm_vcpu *vcpu;
986 
987 	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
988 	kvm_mips_comparecount_func((unsigned long) vcpu);
989 	return kvm_mips_count_timeout(vcpu);
990 }
991 
992 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
993 {
994 	kvm_mips_callbacks->vcpu_init(vcpu);
995 	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
996 		     HRTIMER_MODE_REL);
997 	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
998 	return 0;
999 }
1000 
1001 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1002 {
1003 }
1004 
1005 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1006 				  struct kvm_translation *tr)
1007 {
1008 	return 0;
1009 }
1010 
1011 /* Initial guest state */
1012 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1013 {
1014 	return kvm_mips_callbacks->vcpu_setup(vcpu);
1015 }
1016 
1017 static void kvm_mips_set_c0_status(void)
1018 {
1019 	uint32_t status = read_c0_status();
1020 
1021 	if (cpu_has_fpu)
1022 		status |= (ST0_CU1);
1023 
1024 	if (cpu_has_dsp)
1025 		status |= (ST0_MX);
1026 
1027 	write_c0_status(status);
1028 	ehb();
1029 }
1030 
1031 /*
1032  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1033  */
1034 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1035 {
1036 	uint32_t cause = vcpu->arch.host_cp0_cause;
1037 	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1038 	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
1039 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1040 	enum emulation_result er = EMULATE_DONE;
1041 	int ret = RESUME_GUEST;
1042 
1043 	/* Set a default exit reason */
1044 	run->exit_reason = KVM_EXIT_UNKNOWN;
1045 	run->ready_for_interrupt_injection = 1;
1046 
1047 	/*
1048 	 * Set the appropriate status bits based on host CPU features,
1049 	 * before we hit the scheduler
1050 	 */
1051 	kvm_mips_set_c0_status();
1052 
1053 	local_irq_enable();
1054 
1055 	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1056 			cause, opc, run, vcpu);
1057 
1058 	/*
1059 	 * Do a privilege check, if in UM most of these exit conditions end up
1060 	 * causing an exception to be delivered to the Guest Kernel
1061 	 */
1062 	er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1063 	if (er == EMULATE_PRIV_FAIL) {
1064 		goto skip_emul;
1065 	} else if (er == EMULATE_FAIL) {
1066 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1067 		ret = RESUME_HOST;
1068 		goto skip_emul;
1069 	}
1070 
1071 	switch (exccode) {
1072 	case T_INT:
1073 		kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
1074 
1075 		++vcpu->stat.int_exits;
1076 		trace_kvm_exit(vcpu, INT_EXITS);
1077 
1078 		if (need_resched())
1079 			cond_resched();
1080 
1081 		ret = RESUME_GUEST;
1082 		break;
1083 
1084 	case T_COP_UNUSABLE:
1085 		kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
1086 
1087 		++vcpu->stat.cop_unusable_exits;
1088 		trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1089 		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1090 		/* XXXKYMA: Might need to return to user space */
1091 		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1092 			ret = RESUME_HOST;
1093 		break;
1094 
1095 	case T_TLB_MOD:
1096 		++vcpu->stat.tlbmod_exits;
1097 		trace_kvm_exit(vcpu, TLBMOD_EXITS);
1098 		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1099 		break;
1100 
1101 	case T_TLB_ST_MISS:
1102 		kvm_debug("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1103 			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1104 			  badvaddr);
1105 
1106 		++vcpu->stat.tlbmiss_st_exits;
1107 		trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
1108 		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1109 		break;
1110 
1111 	case T_TLB_LD_MISS:
1112 		kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1113 			  cause, opc, badvaddr);
1114 
1115 		++vcpu->stat.tlbmiss_ld_exits;
1116 		trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
1117 		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1118 		break;
1119 
1120 	case T_ADDR_ERR_ST:
1121 		++vcpu->stat.addrerr_st_exits;
1122 		trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
1123 		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1124 		break;
1125 
1126 	case T_ADDR_ERR_LD:
1127 		++vcpu->stat.addrerr_ld_exits;
1128 		trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
1129 		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1130 		break;
1131 
1132 	case T_SYSCALL:
1133 		++vcpu->stat.syscall_exits;
1134 		trace_kvm_exit(vcpu, SYSCALL_EXITS);
1135 		ret = kvm_mips_callbacks->handle_syscall(vcpu);
1136 		break;
1137 
1138 	case T_RES_INST:
1139 		++vcpu->stat.resvd_inst_exits;
1140 		trace_kvm_exit(vcpu, RESVD_INST_EXITS);
1141 		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1142 		break;
1143 
1144 	case T_BREAK:
1145 		++vcpu->stat.break_inst_exits;
1146 		trace_kvm_exit(vcpu, BREAK_INST_EXITS);
1147 		ret = kvm_mips_callbacks->handle_break(vcpu);
1148 		break;
1149 
1150 	default:
1151 		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
1152 			exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1153 			kvm_read_c0_guest_status(vcpu->arch.cop0));
1154 		kvm_arch_vcpu_dump_regs(vcpu);
1155 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1156 		ret = RESUME_HOST;
1157 		break;
1158 
1159 	}
1160 
1161 skip_emul:
1162 	local_irq_disable();
1163 
1164 	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1165 		kvm_mips_deliver_interrupts(vcpu, cause);
1166 
1167 	if (!(ret & RESUME_HOST)) {
1168 		/* Only check for signals if not already exiting to userspace */
1169 		if (signal_pending(current)) {
1170 			run->exit_reason = KVM_EXIT_INTR;
1171 			ret = (-EINTR << 2) | RESUME_HOST;
1172 			++vcpu->stat.signal_exits;
1173 			trace_kvm_exit(vcpu, SIGNAL_EXITS);
1174 		}
1175 	}
1176 
1177 	return ret;
1178 }
1179 
1180 int __init kvm_mips_init(void)
1181 {
1182 	int ret;
1183 
1184 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1185 
1186 	if (ret)
1187 		return ret;
1188 
1189 	/*
1190 	 * On MIPS, kernel modules are executed from "mapped space", which
1191 	 * requires TLBs. The TLB handling code is statically linked with
1192 	 * the rest of the kernel (tlb.c) to avoid the possibility of
1193 	 * double faulting. The issue is that the TLB code references
1194 	 * routines that are part of the the KVM module, which are only
1195 	 * available once the module is loaded.
1196 	 */
1197 	kvm_mips_gfn_to_pfn = gfn_to_pfn;
1198 	kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
1199 	kvm_mips_is_error_pfn = is_error_pfn;
1200 
1201 	pr_info("KVM/MIPS Initialized\n");
1202 	return 0;
1203 }
1204 
1205 void __exit kvm_mips_exit(void)
1206 {
1207 	kvm_exit();
1208 
1209 	kvm_mips_gfn_to_pfn = NULL;
1210 	kvm_mips_release_pfn_clean = NULL;
1211 	kvm_mips_is_error_pfn = NULL;
1212 
1213 	pr_info("KVM/MIPS unloaded\n");
1214 }
1215 
1216 module_init(kvm_mips_init);
1217 module_exit(kvm_mips_exit);
1218 
1219 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
1220