xref: /openbmc/linux/arch/mips/kvm/mips.c (revision e0bf6c5c)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: MIPS specific KVM APIs
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11 
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16 #include <linux/fs.h>
17 #include <linux/bootmem.h>
18 #include <asm/fpu.h>
19 #include <asm/page.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mmu_context.h>
22 #include <asm/pgtable.h>
23 
24 #include <linux/kvm_host.h>
25 
26 #include "interrupt.h"
27 #include "commpage.h"
28 
29 #define CREATE_TRACE_POINTS
30 #include "trace.h"
31 
32 #ifndef VECTORSPACING
33 #define VECTORSPACING 0x100	/* for EI/VI mode */
34 #endif
35 
36 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
37 struct kvm_stats_debugfs_item debugfs_entries[] = {
38 	{ "wait",	  VCPU_STAT(wait_exits),	 KVM_STAT_VCPU },
39 	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
40 	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
41 	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
42 	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
43 	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
44 	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
45 	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
46 	{ "addrerr_st",	  VCPU_STAT(addrerr_st_exits),	 KVM_STAT_VCPU },
47 	{ "addrerr_ld",	  VCPU_STAT(addrerr_ld_exits),	 KVM_STAT_VCPU },
48 	{ "syscall",	  VCPU_STAT(syscall_exits),	 KVM_STAT_VCPU },
49 	{ "resvd_inst",	  VCPU_STAT(resvd_inst_exits),	 KVM_STAT_VCPU },
50 	{ "break_inst",	  VCPU_STAT(break_inst_exits),	 KVM_STAT_VCPU },
51 	{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
52 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
53 	{ "halt_wakeup",  VCPU_STAT(halt_wakeup),	 KVM_STAT_VCPU },
54 	{NULL}
55 };
56 
57 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
58 {
59 	int i;
60 
61 	for_each_possible_cpu(i) {
62 		vcpu->arch.guest_kernel_asid[i] = 0;
63 		vcpu->arch.guest_user_asid[i] = 0;
64 	}
65 
66 	return 0;
67 }
68 
69 /*
70  * XXXKYMA: We are simulatoring a processor that has the WII bit set in
71  * Config7, so we are "runnable" if interrupts are pending
72  */
73 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
74 {
75 	return !!(vcpu->arch.pending_exceptions);
76 }
77 
78 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
79 {
80 	return 1;
81 }
82 
83 int kvm_arch_hardware_enable(void)
84 {
85 	return 0;
86 }
87 
88 int kvm_arch_hardware_setup(void)
89 {
90 	return 0;
91 }
92 
93 void kvm_arch_check_processor_compat(void *rtn)
94 {
95 	*(int *)rtn = 0;
96 }
97 
98 static void kvm_mips_init_tlbs(struct kvm *kvm)
99 {
100 	unsigned long wired;
101 
102 	/*
103 	 * Add a wired entry to the TLB, it is used to map the commpage to
104 	 * the Guest kernel
105 	 */
106 	wired = read_c0_wired();
107 	write_c0_wired(wired + 1);
108 	mtc0_tlbw_hazard();
109 	kvm->arch.commpage_tlb = wired;
110 
111 	kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
112 		  kvm->arch.commpage_tlb);
113 }
114 
115 static void kvm_mips_init_vm_percpu(void *arg)
116 {
117 	struct kvm *kvm = (struct kvm *)arg;
118 
119 	kvm_mips_init_tlbs(kvm);
120 	kvm_mips_callbacks->vm_init(kvm);
121 
122 }
123 
124 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
125 {
126 	if (atomic_inc_return(&kvm_mips_instance) == 1) {
127 		kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
128 			  __func__);
129 		on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
130 	}
131 
132 	return 0;
133 }
134 
135 void kvm_mips_free_vcpus(struct kvm *kvm)
136 {
137 	unsigned int i;
138 	struct kvm_vcpu *vcpu;
139 
140 	/* Put the pages we reserved for the guest pmap */
141 	for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
142 		if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
143 			kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
144 	}
145 	kfree(kvm->arch.guest_pmap);
146 
147 	kvm_for_each_vcpu(i, vcpu, kvm) {
148 		kvm_arch_vcpu_free(vcpu);
149 	}
150 
151 	mutex_lock(&kvm->lock);
152 
153 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
154 		kvm->vcpus[i] = NULL;
155 
156 	atomic_set(&kvm->online_vcpus, 0);
157 
158 	mutex_unlock(&kvm->lock);
159 }
160 
161 static void kvm_mips_uninit_tlbs(void *arg)
162 {
163 	/* Restore wired count */
164 	write_c0_wired(0);
165 	mtc0_tlbw_hazard();
166 	/* Clear out all the TLBs */
167 	kvm_local_flush_tlb_all();
168 }
169 
170 void kvm_arch_destroy_vm(struct kvm *kvm)
171 {
172 	kvm_mips_free_vcpus(kvm);
173 
174 	/* If this is the last instance, restore wired count */
175 	if (atomic_dec_return(&kvm_mips_instance) == 0) {
176 		kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
177 			  __func__);
178 		on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
179 	}
180 }
181 
182 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
183 			unsigned long arg)
184 {
185 	return -ENOIOCTLCMD;
186 }
187 
188 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
189 			    unsigned long npages)
190 {
191 	return 0;
192 }
193 
194 int kvm_arch_prepare_memory_region(struct kvm *kvm,
195 				   struct kvm_memory_slot *memslot,
196 				   struct kvm_userspace_memory_region *mem,
197 				   enum kvm_mr_change change)
198 {
199 	return 0;
200 }
201 
202 void kvm_arch_commit_memory_region(struct kvm *kvm,
203 				   struct kvm_userspace_memory_region *mem,
204 				   const struct kvm_memory_slot *old,
205 				   enum kvm_mr_change change)
206 {
207 	unsigned long npages = 0;
208 	int i;
209 
210 	kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
211 		  __func__, kvm, mem->slot, mem->guest_phys_addr,
212 		  mem->memory_size, mem->userspace_addr);
213 
214 	/* Setup Guest PMAP table */
215 	if (!kvm->arch.guest_pmap) {
216 		if (mem->slot == 0)
217 			npages = mem->memory_size >> PAGE_SHIFT;
218 
219 		if (npages) {
220 			kvm->arch.guest_pmap_npages = npages;
221 			kvm->arch.guest_pmap =
222 			    kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
223 
224 			if (!kvm->arch.guest_pmap) {
225 				kvm_err("Failed to allocate guest PMAP");
226 				return;
227 			}
228 
229 			kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
230 				  npages, kvm->arch.guest_pmap);
231 
232 			/* Now setup the page table */
233 			for (i = 0; i < npages; i++)
234 				kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
235 		}
236 	}
237 }
238 
239 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
240 {
241 	int err, size, offset;
242 	void *gebase;
243 	int i;
244 
245 	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
246 
247 	if (!vcpu) {
248 		err = -ENOMEM;
249 		goto out;
250 	}
251 
252 	err = kvm_vcpu_init(vcpu, kvm, id);
253 
254 	if (err)
255 		goto out_free_cpu;
256 
257 	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
258 
259 	/*
260 	 * Allocate space for host mode exception handlers that handle
261 	 * guest mode exits
262 	 */
263 	if (cpu_has_veic || cpu_has_vint)
264 		size = 0x200 + VECTORSPACING * 64;
265 	else
266 		size = 0x4000;
267 
268 	/* Save Linux EBASE */
269 	vcpu->arch.host_ebase = (void *)read_c0_ebase();
270 
271 	gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
272 
273 	if (!gebase) {
274 		err = -ENOMEM;
275 		goto out_free_cpu;
276 	}
277 	kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
278 		  ALIGN(size, PAGE_SIZE), gebase);
279 
280 	/* Save new ebase */
281 	vcpu->arch.guest_ebase = gebase;
282 
283 	/* Copy L1 Guest Exception handler to correct offset */
284 
285 	/* TLB Refill, EXL = 0 */
286 	memcpy(gebase, mips32_exception,
287 	       mips32_exceptionEnd - mips32_exception);
288 
289 	/* General Exception Entry point */
290 	memcpy(gebase + 0x180, mips32_exception,
291 	       mips32_exceptionEnd - mips32_exception);
292 
293 	/* For vectored interrupts poke the exception code @ all offsets 0-7 */
294 	for (i = 0; i < 8; i++) {
295 		kvm_debug("L1 Vectored handler @ %p\n",
296 			  gebase + 0x200 + (i * VECTORSPACING));
297 		memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
298 		       mips32_exceptionEnd - mips32_exception);
299 	}
300 
301 	/* General handler, relocate to unmapped space for sanity's sake */
302 	offset = 0x2000;
303 	kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
304 		  gebase + offset,
305 		  mips32_GuestExceptionEnd - mips32_GuestException);
306 
307 	memcpy(gebase + offset, mips32_GuestException,
308 	       mips32_GuestExceptionEnd - mips32_GuestException);
309 
310 	/* Invalidate the icache for these ranges */
311 	local_flush_icache_range((unsigned long)gebase,
312 				(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
313 
314 	/*
315 	 * Allocate comm page for guest kernel, a TLB will be reserved for
316 	 * mapping GVA @ 0xFFFF8000 to this page
317 	 */
318 	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
319 
320 	if (!vcpu->arch.kseg0_commpage) {
321 		err = -ENOMEM;
322 		goto out_free_gebase;
323 	}
324 
325 	kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
326 	kvm_mips_commpage_init(vcpu);
327 
328 	/* Init */
329 	vcpu->arch.last_sched_cpu = -1;
330 
331 	/* Start off the timer */
332 	kvm_mips_init_count(vcpu);
333 
334 	return vcpu;
335 
336 out_free_gebase:
337 	kfree(gebase);
338 
339 out_free_cpu:
340 	kfree(vcpu);
341 
342 out:
343 	return ERR_PTR(err);
344 }
345 
346 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
347 {
348 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
349 
350 	kvm_vcpu_uninit(vcpu);
351 
352 	kvm_mips_dump_stats(vcpu);
353 
354 	kfree(vcpu->arch.guest_ebase);
355 	kfree(vcpu->arch.kseg0_commpage);
356 	kfree(vcpu);
357 }
358 
359 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
360 {
361 	kvm_arch_vcpu_free(vcpu);
362 }
363 
364 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
365 					struct kvm_guest_debug *dbg)
366 {
367 	return -ENOIOCTLCMD;
368 }
369 
370 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
371 {
372 	int r = 0;
373 	sigset_t sigsaved;
374 
375 	if (vcpu->sigset_active)
376 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
377 
378 	if (vcpu->mmio_needed) {
379 		if (!vcpu->mmio_is_write)
380 			kvm_mips_complete_mmio_load(vcpu, run);
381 		vcpu->mmio_needed = 0;
382 	}
383 
384 	lose_fpu(1);
385 
386 	local_irq_disable();
387 	/* Check if we have any exceptions/interrupts pending */
388 	kvm_mips_deliver_interrupts(vcpu,
389 				    kvm_read_c0_guest_cause(vcpu->arch.cop0));
390 
391 	kvm_guest_enter();
392 
393 	/* Disable hardware page table walking while in guest */
394 	htw_stop();
395 
396 	r = __kvm_mips_vcpu_run(run, vcpu);
397 
398 	/* Re-enable HTW before enabling interrupts */
399 	htw_start();
400 
401 	kvm_guest_exit();
402 	local_irq_enable();
403 
404 	if (vcpu->sigset_active)
405 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
406 
407 	return r;
408 }
409 
410 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
411 			     struct kvm_mips_interrupt *irq)
412 {
413 	int intr = (int)irq->irq;
414 	struct kvm_vcpu *dvcpu = NULL;
415 
416 	if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
417 		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
418 			  (int)intr);
419 
420 	if (irq->cpu == -1)
421 		dvcpu = vcpu;
422 	else
423 		dvcpu = vcpu->kvm->vcpus[irq->cpu];
424 
425 	if (intr == 2 || intr == 3 || intr == 4) {
426 		kvm_mips_callbacks->queue_io_int(dvcpu, irq);
427 
428 	} else if (intr == -2 || intr == -3 || intr == -4) {
429 		kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
430 	} else {
431 		kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
432 			irq->cpu, irq->irq);
433 		return -EINVAL;
434 	}
435 
436 	dvcpu->arch.wait = 0;
437 
438 	if (waitqueue_active(&dvcpu->wq))
439 		wake_up_interruptible(&dvcpu->wq);
440 
441 	return 0;
442 }
443 
444 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
445 				    struct kvm_mp_state *mp_state)
446 {
447 	return -ENOIOCTLCMD;
448 }
449 
450 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
451 				    struct kvm_mp_state *mp_state)
452 {
453 	return -ENOIOCTLCMD;
454 }
455 
456 static u64 kvm_mips_get_one_regs[] = {
457 	KVM_REG_MIPS_R0,
458 	KVM_REG_MIPS_R1,
459 	KVM_REG_MIPS_R2,
460 	KVM_REG_MIPS_R3,
461 	KVM_REG_MIPS_R4,
462 	KVM_REG_MIPS_R5,
463 	KVM_REG_MIPS_R6,
464 	KVM_REG_MIPS_R7,
465 	KVM_REG_MIPS_R8,
466 	KVM_REG_MIPS_R9,
467 	KVM_REG_MIPS_R10,
468 	KVM_REG_MIPS_R11,
469 	KVM_REG_MIPS_R12,
470 	KVM_REG_MIPS_R13,
471 	KVM_REG_MIPS_R14,
472 	KVM_REG_MIPS_R15,
473 	KVM_REG_MIPS_R16,
474 	KVM_REG_MIPS_R17,
475 	KVM_REG_MIPS_R18,
476 	KVM_REG_MIPS_R19,
477 	KVM_REG_MIPS_R20,
478 	KVM_REG_MIPS_R21,
479 	KVM_REG_MIPS_R22,
480 	KVM_REG_MIPS_R23,
481 	KVM_REG_MIPS_R24,
482 	KVM_REG_MIPS_R25,
483 	KVM_REG_MIPS_R26,
484 	KVM_REG_MIPS_R27,
485 	KVM_REG_MIPS_R28,
486 	KVM_REG_MIPS_R29,
487 	KVM_REG_MIPS_R30,
488 	KVM_REG_MIPS_R31,
489 
490 	KVM_REG_MIPS_HI,
491 	KVM_REG_MIPS_LO,
492 	KVM_REG_MIPS_PC,
493 
494 	KVM_REG_MIPS_CP0_INDEX,
495 	KVM_REG_MIPS_CP0_CONTEXT,
496 	KVM_REG_MIPS_CP0_USERLOCAL,
497 	KVM_REG_MIPS_CP0_PAGEMASK,
498 	KVM_REG_MIPS_CP0_WIRED,
499 	KVM_REG_MIPS_CP0_HWRENA,
500 	KVM_REG_MIPS_CP0_BADVADDR,
501 	KVM_REG_MIPS_CP0_COUNT,
502 	KVM_REG_MIPS_CP0_ENTRYHI,
503 	KVM_REG_MIPS_CP0_COMPARE,
504 	KVM_REG_MIPS_CP0_STATUS,
505 	KVM_REG_MIPS_CP0_CAUSE,
506 	KVM_REG_MIPS_CP0_EPC,
507 	KVM_REG_MIPS_CP0_CONFIG,
508 	KVM_REG_MIPS_CP0_CONFIG1,
509 	KVM_REG_MIPS_CP0_CONFIG2,
510 	KVM_REG_MIPS_CP0_CONFIG3,
511 	KVM_REG_MIPS_CP0_CONFIG7,
512 	KVM_REG_MIPS_CP0_ERROREPC,
513 
514 	KVM_REG_MIPS_COUNT_CTL,
515 	KVM_REG_MIPS_COUNT_RESUME,
516 	KVM_REG_MIPS_COUNT_HZ,
517 };
518 
519 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
520 			    const struct kvm_one_reg *reg)
521 {
522 	struct mips_coproc *cop0 = vcpu->arch.cop0;
523 	int ret;
524 	s64 v;
525 
526 	switch (reg->id) {
527 	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
528 		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
529 		break;
530 	case KVM_REG_MIPS_HI:
531 		v = (long)vcpu->arch.hi;
532 		break;
533 	case KVM_REG_MIPS_LO:
534 		v = (long)vcpu->arch.lo;
535 		break;
536 	case KVM_REG_MIPS_PC:
537 		v = (long)vcpu->arch.pc;
538 		break;
539 
540 	case KVM_REG_MIPS_CP0_INDEX:
541 		v = (long)kvm_read_c0_guest_index(cop0);
542 		break;
543 	case KVM_REG_MIPS_CP0_CONTEXT:
544 		v = (long)kvm_read_c0_guest_context(cop0);
545 		break;
546 	case KVM_REG_MIPS_CP0_USERLOCAL:
547 		v = (long)kvm_read_c0_guest_userlocal(cop0);
548 		break;
549 	case KVM_REG_MIPS_CP0_PAGEMASK:
550 		v = (long)kvm_read_c0_guest_pagemask(cop0);
551 		break;
552 	case KVM_REG_MIPS_CP0_WIRED:
553 		v = (long)kvm_read_c0_guest_wired(cop0);
554 		break;
555 	case KVM_REG_MIPS_CP0_HWRENA:
556 		v = (long)kvm_read_c0_guest_hwrena(cop0);
557 		break;
558 	case KVM_REG_MIPS_CP0_BADVADDR:
559 		v = (long)kvm_read_c0_guest_badvaddr(cop0);
560 		break;
561 	case KVM_REG_MIPS_CP0_ENTRYHI:
562 		v = (long)kvm_read_c0_guest_entryhi(cop0);
563 		break;
564 	case KVM_REG_MIPS_CP0_COMPARE:
565 		v = (long)kvm_read_c0_guest_compare(cop0);
566 		break;
567 	case KVM_REG_MIPS_CP0_STATUS:
568 		v = (long)kvm_read_c0_guest_status(cop0);
569 		break;
570 	case KVM_REG_MIPS_CP0_CAUSE:
571 		v = (long)kvm_read_c0_guest_cause(cop0);
572 		break;
573 	case KVM_REG_MIPS_CP0_EPC:
574 		v = (long)kvm_read_c0_guest_epc(cop0);
575 		break;
576 	case KVM_REG_MIPS_CP0_ERROREPC:
577 		v = (long)kvm_read_c0_guest_errorepc(cop0);
578 		break;
579 	case KVM_REG_MIPS_CP0_CONFIG:
580 		v = (long)kvm_read_c0_guest_config(cop0);
581 		break;
582 	case KVM_REG_MIPS_CP0_CONFIG1:
583 		v = (long)kvm_read_c0_guest_config1(cop0);
584 		break;
585 	case KVM_REG_MIPS_CP0_CONFIG2:
586 		v = (long)kvm_read_c0_guest_config2(cop0);
587 		break;
588 	case KVM_REG_MIPS_CP0_CONFIG3:
589 		v = (long)kvm_read_c0_guest_config3(cop0);
590 		break;
591 	case KVM_REG_MIPS_CP0_CONFIG7:
592 		v = (long)kvm_read_c0_guest_config7(cop0);
593 		break;
594 	/* registers to be handled specially */
595 	case KVM_REG_MIPS_CP0_COUNT:
596 	case KVM_REG_MIPS_COUNT_CTL:
597 	case KVM_REG_MIPS_COUNT_RESUME:
598 	case KVM_REG_MIPS_COUNT_HZ:
599 		ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
600 		if (ret)
601 			return ret;
602 		break;
603 	default:
604 		return -EINVAL;
605 	}
606 	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
607 		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
608 
609 		return put_user(v, uaddr64);
610 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
611 		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
612 		u32 v32 = (u32)v;
613 
614 		return put_user(v32, uaddr32);
615 	} else {
616 		return -EINVAL;
617 	}
618 }
619 
620 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
621 			    const struct kvm_one_reg *reg)
622 {
623 	struct mips_coproc *cop0 = vcpu->arch.cop0;
624 	u64 v;
625 
626 	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
627 		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
628 
629 		if (get_user(v, uaddr64) != 0)
630 			return -EFAULT;
631 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
632 		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
633 		s32 v32;
634 
635 		if (get_user(v32, uaddr32) != 0)
636 			return -EFAULT;
637 		v = (s64)v32;
638 	} else {
639 		return -EINVAL;
640 	}
641 
642 	switch (reg->id) {
643 	case KVM_REG_MIPS_R0:
644 		/* Silently ignore requests to set $0 */
645 		break;
646 	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
647 		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
648 		break;
649 	case KVM_REG_MIPS_HI:
650 		vcpu->arch.hi = v;
651 		break;
652 	case KVM_REG_MIPS_LO:
653 		vcpu->arch.lo = v;
654 		break;
655 	case KVM_REG_MIPS_PC:
656 		vcpu->arch.pc = v;
657 		break;
658 
659 	case KVM_REG_MIPS_CP0_INDEX:
660 		kvm_write_c0_guest_index(cop0, v);
661 		break;
662 	case KVM_REG_MIPS_CP0_CONTEXT:
663 		kvm_write_c0_guest_context(cop0, v);
664 		break;
665 	case KVM_REG_MIPS_CP0_USERLOCAL:
666 		kvm_write_c0_guest_userlocal(cop0, v);
667 		break;
668 	case KVM_REG_MIPS_CP0_PAGEMASK:
669 		kvm_write_c0_guest_pagemask(cop0, v);
670 		break;
671 	case KVM_REG_MIPS_CP0_WIRED:
672 		kvm_write_c0_guest_wired(cop0, v);
673 		break;
674 	case KVM_REG_MIPS_CP0_HWRENA:
675 		kvm_write_c0_guest_hwrena(cop0, v);
676 		break;
677 	case KVM_REG_MIPS_CP0_BADVADDR:
678 		kvm_write_c0_guest_badvaddr(cop0, v);
679 		break;
680 	case KVM_REG_MIPS_CP0_ENTRYHI:
681 		kvm_write_c0_guest_entryhi(cop0, v);
682 		break;
683 	case KVM_REG_MIPS_CP0_STATUS:
684 		kvm_write_c0_guest_status(cop0, v);
685 		break;
686 	case KVM_REG_MIPS_CP0_EPC:
687 		kvm_write_c0_guest_epc(cop0, v);
688 		break;
689 	case KVM_REG_MIPS_CP0_ERROREPC:
690 		kvm_write_c0_guest_errorepc(cop0, v);
691 		break;
692 	/* registers to be handled specially */
693 	case KVM_REG_MIPS_CP0_COUNT:
694 	case KVM_REG_MIPS_CP0_COMPARE:
695 	case KVM_REG_MIPS_CP0_CAUSE:
696 	case KVM_REG_MIPS_COUNT_CTL:
697 	case KVM_REG_MIPS_COUNT_RESUME:
698 	case KVM_REG_MIPS_COUNT_HZ:
699 		return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
700 	default:
701 		return -EINVAL;
702 	}
703 	return 0;
704 }
705 
706 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
707 			 unsigned long arg)
708 {
709 	struct kvm_vcpu *vcpu = filp->private_data;
710 	void __user *argp = (void __user *)arg;
711 	long r;
712 
713 	switch (ioctl) {
714 	case KVM_SET_ONE_REG:
715 	case KVM_GET_ONE_REG: {
716 		struct kvm_one_reg reg;
717 
718 		if (copy_from_user(&reg, argp, sizeof(reg)))
719 			return -EFAULT;
720 		if (ioctl == KVM_SET_ONE_REG)
721 			return kvm_mips_set_reg(vcpu, &reg);
722 		else
723 			return kvm_mips_get_reg(vcpu, &reg);
724 	}
725 	case KVM_GET_REG_LIST: {
726 		struct kvm_reg_list __user *user_list = argp;
727 		u64 __user *reg_dest;
728 		struct kvm_reg_list reg_list;
729 		unsigned n;
730 
731 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
732 			return -EFAULT;
733 		n = reg_list.n;
734 		reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
735 		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
736 			return -EFAULT;
737 		if (n < reg_list.n)
738 			return -E2BIG;
739 		reg_dest = user_list->reg;
740 		if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
741 				 sizeof(kvm_mips_get_one_regs)))
742 			return -EFAULT;
743 		return 0;
744 	}
745 	case KVM_NMI:
746 		/* Treat the NMI as a CPU reset */
747 		r = kvm_mips_reset_vcpu(vcpu);
748 		break;
749 	case KVM_INTERRUPT:
750 		{
751 			struct kvm_mips_interrupt irq;
752 
753 			r = -EFAULT;
754 			if (copy_from_user(&irq, argp, sizeof(irq)))
755 				goto out;
756 
757 			kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
758 				  irq.irq);
759 
760 			r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
761 			break;
762 		}
763 	default:
764 		r = -ENOIOCTLCMD;
765 	}
766 
767 out:
768 	return r;
769 }
770 
771 /* Get (and clear) the dirty memory log for a memory slot. */
772 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
773 {
774 	struct kvm_memory_slot *memslot;
775 	unsigned long ga, ga_end;
776 	int is_dirty = 0;
777 	int r;
778 	unsigned long n;
779 
780 	mutex_lock(&kvm->slots_lock);
781 
782 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
783 	if (r)
784 		goto out;
785 
786 	/* If nothing is dirty, don't bother messing with page tables. */
787 	if (is_dirty) {
788 		memslot = &kvm->memslots->memslots[log->slot];
789 
790 		ga = memslot->base_gfn << PAGE_SHIFT;
791 		ga_end = ga + (memslot->npages << PAGE_SHIFT);
792 
793 		kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
794 			 ga_end);
795 
796 		n = kvm_dirty_bitmap_bytes(memslot);
797 		memset(memslot->dirty_bitmap, 0, n);
798 	}
799 
800 	r = 0;
801 out:
802 	mutex_unlock(&kvm->slots_lock);
803 	return r;
804 
805 }
806 
807 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
808 {
809 	long r;
810 
811 	switch (ioctl) {
812 	default:
813 		r = -ENOIOCTLCMD;
814 	}
815 
816 	return r;
817 }
818 
819 int kvm_arch_init(void *opaque)
820 {
821 	if (kvm_mips_callbacks) {
822 		kvm_err("kvm: module already exists\n");
823 		return -EEXIST;
824 	}
825 
826 	return kvm_mips_emulation_init(&kvm_mips_callbacks);
827 }
828 
829 void kvm_arch_exit(void)
830 {
831 	kvm_mips_callbacks = NULL;
832 }
833 
834 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
835 				  struct kvm_sregs *sregs)
836 {
837 	return -ENOIOCTLCMD;
838 }
839 
840 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
841 				  struct kvm_sregs *sregs)
842 {
843 	return -ENOIOCTLCMD;
844 }
845 
846 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
847 {
848 }
849 
850 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
851 {
852 	return -ENOIOCTLCMD;
853 }
854 
855 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
856 {
857 	return -ENOIOCTLCMD;
858 }
859 
860 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
861 {
862 	return VM_FAULT_SIGBUS;
863 }
864 
865 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
866 {
867 	int r;
868 
869 	switch (ext) {
870 	case KVM_CAP_ONE_REG:
871 		r = 1;
872 		break;
873 	case KVM_CAP_COALESCED_MMIO:
874 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
875 		break;
876 	default:
877 		r = 0;
878 		break;
879 	}
880 	return r;
881 }
882 
883 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
884 {
885 	return kvm_mips_pending_timer(vcpu);
886 }
887 
888 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
889 {
890 	int i;
891 	struct mips_coproc *cop0;
892 
893 	if (!vcpu)
894 		return -1;
895 
896 	kvm_debug("VCPU Register Dump:\n");
897 	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
898 	kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
899 
900 	for (i = 0; i < 32; i += 4) {
901 		kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
902 		       vcpu->arch.gprs[i],
903 		       vcpu->arch.gprs[i + 1],
904 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
905 	}
906 	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
907 	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
908 
909 	cop0 = vcpu->arch.cop0;
910 	kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
911 		  kvm_read_c0_guest_status(cop0),
912 		  kvm_read_c0_guest_cause(cop0));
913 
914 	kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
915 
916 	return 0;
917 }
918 
919 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
920 {
921 	int i;
922 
923 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
924 		vcpu->arch.gprs[i] = regs->gpr[i];
925 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
926 	vcpu->arch.hi = regs->hi;
927 	vcpu->arch.lo = regs->lo;
928 	vcpu->arch.pc = regs->pc;
929 
930 	return 0;
931 }
932 
933 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
934 {
935 	int i;
936 
937 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
938 		regs->gpr[i] = vcpu->arch.gprs[i];
939 
940 	regs->hi = vcpu->arch.hi;
941 	regs->lo = vcpu->arch.lo;
942 	regs->pc = vcpu->arch.pc;
943 
944 	return 0;
945 }
946 
947 static void kvm_mips_comparecount_func(unsigned long data)
948 {
949 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
950 
951 	kvm_mips_callbacks->queue_timer_int(vcpu);
952 
953 	vcpu->arch.wait = 0;
954 	if (waitqueue_active(&vcpu->wq))
955 		wake_up_interruptible(&vcpu->wq);
956 }
957 
958 /* low level hrtimer wake routine */
959 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
960 {
961 	struct kvm_vcpu *vcpu;
962 
963 	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
964 	kvm_mips_comparecount_func((unsigned long) vcpu);
965 	return kvm_mips_count_timeout(vcpu);
966 }
967 
968 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
969 {
970 	kvm_mips_callbacks->vcpu_init(vcpu);
971 	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
972 		     HRTIMER_MODE_REL);
973 	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
974 	return 0;
975 }
976 
977 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
978 				  struct kvm_translation *tr)
979 {
980 	return 0;
981 }
982 
983 /* Initial guest state */
984 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
985 {
986 	return kvm_mips_callbacks->vcpu_setup(vcpu);
987 }
988 
989 static void kvm_mips_set_c0_status(void)
990 {
991 	uint32_t status = read_c0_status();
992 
993 	if (cpu_has_dsp)
994 		status |= (ST0_MX);
995 
996 	write_c0_status(status);
997 	ehb();
998 }
999 
1000 /*
1001  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1002  */
1003 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1004 {
1005 	uint32_t cause = vcpu->arch.host_cp0_cause;
1006 	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1007 	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
1008 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1009 	enum emulation_result er = EMULATE_DONE;
1010 	int ret = RESUME_GUEST;
1011 
1012 	/* re-enable HTW before enabling interrupts */
1013 	htw_start();
1014 
1015 	/* Set a default exit reason */
1016 	run->exit_reason = KVM_EXIT_UNKNOWN;
1017 	run->ready_for_interrupt_injection = 1;
1018 
1019 	/*
1020 	 * Set the appropriate status bits based on host CPU features,
1021 	 * before we hit the scheduler
1022 	 */
1023 	kvm_mips_set_c0_status();
1024 
1025 	local_irq_enable();
1026 
1027 	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1028 			cause, opc, run, vcpu);
1029 
1030 	/*
1031 	 * Do a privilege check, if in UM most of these exit conditions end up
1032 	 * causing an exception to be delivered to the Guest Kernel
1033 	 */
1034 	er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1035 	if (er == EMULATE_PRIV_FAIL) {
1036 		goto skip_emul;
1037 	} else if (er == EMULATE_FAIL) {
1038 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1039 		ret = RESUME_HOST;
1040 		goto skip_emul;
1041 	}
1042 
1043 	switch (exccode) {
1044 	case T_INT:
1045 		kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
1046 
1047 		++vcpu->stat.int_exits;
1048 		trace_kvm_exit(vcpu, INT_EXITS);
1049 
1050 		if (need_resched())
1051 			cond_resched();
1052 
1053 		ret = RESUME_GUEST;
1054 		break;
1055 
1056 	case T_COP_UNUSABLE:
1057 		kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
1058 
1059 		++vcpu->stat.cop_unusable_exits;
1060 		trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1061 		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1062 		/* XXXKYMA: Might need to return to user space */
1063 		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1064 			ret = RESUME_HOST;
1065 		break;
1066 
1067 	case T_TLB_MOD:
1068 		++vcpu->stat.tlbmod_exits;
1069 		trace_kvm_exit(vcpu, TLBMOD_EXITS);
1070 		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1071 		break;
1072 
1073 	case T_TLB_ST_MISS:
1074 		kvm_debug("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1075 			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1076 			  badvaddr);
1077 
1078 		++vcpu->stat.tlbmiss_st_exits;
1079 		trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
1080 		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1081 		break;
1082 
1083 	case T_TLB_LD_MISS:
1084 		kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1085 			  cause, opc, badvaddr);
1086 
1087 		++vcpu->stat.tlbmiss_ld_exits;
1088 		trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
1089 		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1090 		break;
1091 
1092 	case T_ADDR_ERR_ST:
1093 		++vcpu->stat.addrerr_st_exits;
1094 		trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
1095 		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1096 		break;
1097 
1098 	case T_ADDR_ERR_LD:
1099 		++vcpu->stat.addrerr_ld_exits;
1100 		trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
1101 		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1102 		break;
1103 
1104 	case T_SYSCALL:
1105 		++vcpu->stat.syscall_exits;
1106 		trace_kvm_exit(vcpu, SYSCALL_EXITS);
1107 		ret = kvm_mips_callbacks->handle_syscall(vcpu);
1108 		break;
1109 
1110 	case T_RES_INST:
1111 		++vcpu->stat.resvd_inst_exits;
1112 		trace_kvm_exit(vcpu, RESVD_INST_EXITS);
1113 		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1114 		break;
1115 
1116 	case T_BREAK:
1117 		++vcpu->stat.break_inst_exits;
1118 		trace_kvm_exit(vcpu, BREAK_INST_EXITS);
1119 		ret = kvm_mips_callbacks->handle_break(vcpu);
1120 		break;
1121 
1122 	default:
1123 		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
1124 			exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1125 			kvm_read_c0_guest_status(vcpu->arch.cop0));
1126 		kvm_arch_vcpu_dump_regs(vcpu);
1127 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1128 		ret = RESUME_HOST;
1129 		break;
1130 
1131 	}
1132 
1133 skip_emul:
1134 	local_irq_disable();
1135 
1136 	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1137 		kvm_mips_deliver_interrupts(vcpu, cause);
1138 
1139 	if (!(ret & RESUME_HOST)) {
1140 		/* Only check for signals if not already exiting to userspace */
1141 		if (signal_pending(current)) {
1142 			run->exit_reason = KVM_EXIT_INTR;
1143 			ret = (-EINTR << 2) | RESUME_HOST;
1144 			++vcpu->stat.signal_exits;
1145 			trace_kvm_exit(vcpu, SIGNAL_EXITS);
1146 		}
1147 	}
1148 
1149 	/* Disable HTW before returning to guest or host */
1150 	htw_stop();
1151 
1152 	return ret;
1153 }
1154 
1155 int __init kvm_mips_init(void)
1156 {
1157 	int ret;
1158 
1159 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1160 
1161 	if (ret)
1162 		return ret;
1163 
1164 	/*
1165 	 * On MIPS, kernel modules are executed from "mapped space", which
1166 	 * requires TLBs. The TLB handling code is statically linked with
1167 	 * the rest of the kernel (tlb.c) to avoid the possibility of
1168 	 * double faulting. The issue is that the TLB code references
1169 	 * routines that are part of the the KVM module, which are only
1170 	 * available once the module is loaded.
1171 	 */
1172 	kvm_mips_gfn_to_pfn = gfn_to_pfn;
1173 	kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
1174 	kvm_mips_is_error_pfn = is_error_pfn;
1175 
1176 	pr_info("KVM/MIPS Initialized\n");
1177 	return 0;
1178 }
1179 
1180 void __exit kvm_mips_exit(void)
1181 {
1182 	kvm_exit();
1183 
1184 	kvm_mips_gfn_to_pfn = NULL;
1185 	kvm_mips_release_pfn_clean = NULL;
1186 	kvm_mips_is_error_pfn = NULL;
1187 
1188 	pr_info("KVM/MIPS unloaded\n");
1189 }
1190 
1191 module_init(kvm_mips_init);
1192 module_exit(kvm_mips_exit);
1193 
1194 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
1195