1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: MIPS specific KVM APIs 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/vmalloc.h> 16 #include <linux/fs.h> 17 #include <linux/bootmem.h> 18 #include <asm/page.h> 19 #include <asm/cacheflush.h> 20 #include <asm/mmu_context.h> 21 22 #include <linux/kvm_host.h> 23 24 #include "interrupt.h" 25 #include "commpage.h" 26 27 #define CREATE_TRACE_POINTS 28 #include "trace.h" 29 30 #ifndef VECTORSPACING 31 #define VECTORSPACING 0x100 /* for EI/VI mode */ 32 #endif 33 34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x) 35 struct kvm_stats_debugfs_item debugfs_entries[] = { 36 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU }, 37 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, 38 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, 39 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, 40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, 41 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, 42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, 43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, 44 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU }, 45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU }, 46 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, 47 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, 48 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, 49 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, 50 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, 51 {NULL} 52 }; 53 54 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) 55 { 56 int i; 57 58 for_each_possible_cpu(i) { 59 vcpu->arch.guest_kernel_asid[i] = 0; 60 vcpu->arch.guest_user_asid[i] = 0; 61 } 62 63 return 0; 64 } 65 66 /* 67 * XXXKYMA: We are simulatoring a processor that has the WII bit set in 68 * Config7, so we are "runnable" if interrupts are pending 69 */ 70 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 71 { 72 return !!(vcpu->arch.pending_exceptions); 73 } 74 75 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 76 { 77 return 1; 78 } 79 80 int kvm_arch_hardware_enable(void) 81 { 82 return 0; 83 } 84 85 int kvm_arch_hardware_setup(void) 86 { 87 return 0; 88 } 89 90 void kvm_arch_check_processor_compat(void *rtn) 91 { 92 *(int *)rtn = 0; 93 } 94 95 static void kvm_mips_init_tlbs(struct kvm *kvm) 96 { 97 unsigned long wired; 98 99 /* 100 * Add a wired entry to the TLB, it is used to map the commpage to 101 * the Guest kernel 102 */ 103 wired = read_c0_wired(); 104 write_c0_wired(wired + 1); 105 mtc0_tlbw_hazard(); 106 kvm->arch.commpage_tlb = wired; 107 108 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), 109 kvm->arch.commpage_tlb); 110 } 111 112 static void kvm_mips_init_vm_percpu(void *arg) 113 { 114 struct kvm *kvm = (struct kvm *)arg; 115 116 kvm_mips_init_tlbs(kvm); 117 kvm_mips_callbacks->vm_init(kvm); 118 119 } 120 121 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 122 { 123 if (atomic_inc_return(&kvm_mips_instance) == 1) { 124 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n", 125 __func__); 126 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); 127 } 128 129 return 0; 130 } 131 132 void kvm_mips_free_vcpus(struct kvm *kvm) 133 { 134 unsigned int i; 135 struct kvm_vcpu *vcpu; 136 137 /* Put the pages we reserved for the guest pmap */ 138 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { 139 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) 140 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); 141 } 142 kfree(kvm->arch.guest_pmap); 143 144 kvm_for_each_vcpu(i, vcpu, kvm) { 145 kvm_arch_vcpu_free(vcpu); 146 } 147 148 mutex_lock(&kvm->lock); 149 150 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 151 kvm->vcpus[i] = NULL; 152 153 atomic_set(&kvm->online_vcpus, 0); 154 155 mutex_unlock(&kvm->lock); 156 } 157 158 static void kvm_mips_uninit_tlbs(void *arg) 159 { 160 /* Restore wired count */ 161 write_c0_wired(0); 162 mtc0_tlbw_hazard(); 163 /* Clear out all the TLBs */ 164 kvm_local_flush_tlb_all(); 165 } 166 167 void kvm_arch_destroy_vm(struct kvm *kvm) 168 { 169 kvm_mips_free_vcpus(kvm); 170 171 /* If this is the last instance, restore wired count */ 172 if (atomic_dec_return(&kvm_mips_instance) == 0) { 173 kvm_debug("%s: last KVM instance, restoring TLB parameters\n", 174 __func__); 175 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); 176 } 177 } 178 179 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, 180 unsigned long arg) 181 { 182 return -ENOIOCTLCMD; 183 } 184 185 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 186 unsigned long npages) 187 { 188 return 0; 189 } 190 191 int kvm_arch_prepare_memory_region(struct kvm *kvm, 192 struct kvm_memory_slot *memslot, 193 struct kvm_userspace_memory_region *mem, 194 enum kvm_mr_change change) 195 { 196 return 0; 197 } 198 199 void kvm_arch_commit_memory_region(struct kvm *kvm, 200 struct kvm_userspace_memory_region *mem, 201 const struct kvm_memory_slot *old, 202 enum kvm_mr_change change) 203 { 204 unsigned long npages = 0; 205 int i; 206 207 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", 208 __func__, kvm, mem->slot, mem->guest_phys_addr, 209 mem->memory_size, mem->userspace_addr); 210 211 /* Setup Guest PMAP table */ 212 if (!kvm->arch.guest_pmap) { 213 if (mem->slot == 0) 214 npages = mem->memory_size >> PAGE_SHIFT; 215 216 if (npages) { 217 kvm->arch.guest_pmap_npages = npages; 218 kvm->arch.guest_pmap = 219 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); 220 221 if (!kvm->arch.guest_pmap) { 222 kvm_err("Failed to allocate guest PMAP"); 223 return; 224 } 225 226 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", 227 npages, kvm->arch.guest_pmap); 228 229 /* Now setup the page table */ 230 for (i = 0; i < npages; i++) 231 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; 232 } 233 } 234 } 235 236 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 237 { 238 int err, size, offset; 239 void *gebase; 240 int i; 241 242 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); 243 244 if (!vcpu) { 245 err = -ENOMEM; 246 goto out; 247 } 248 249 err = kvm_vcpu_init(vcpu, kvm, id); 250 251 if (err) 252 goto out_free_cpu; 253 254 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); 255 256 /* 257 * Allocate space for host mode exception handlers that handle 258 * guest mode exits 259 */ 260 if (cpu_has_veic || cpu_has_vint) 261 size = 0x200 + VECTORSPACING * 64; 262 else 263 size = 0x4000; 264 265 /* Save Linux EBASE */ 266 vcpu->arch.host_ebase = (void *)read_c0_ebase(); 267 268 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); 269 270 if (!gebase) { 271 err = -ENOMEM; 272 goto out_free_cpu; 273 } 274 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", 275 ALIGN(size, PAGE_SIZE), gebase); 276 277 /* Save new ebase */ 278 vcpu->arch.guest_ebase = gebase; 279 280 /* Copy L1 Guest Exception handler to correct offset */ 281 282 /* TLB Refill, EXL = 0 */ 283 memcpy(gebase, mips32_exception, 284 mips32_exceptionEnd - mips32_exception); 285 286 /* General Exception Entry point */ 287 memcpy(gebase + 0x180, mips32_exception, 288 mips32_exceptionEnd - mips32_exception); 289 290 /* For vectored interrupts poke the exception code @ all offsets 0-7 */ 291 for (i = 0; i < 8; i++) { 292 kvm_debug("L1 Vectored handler @ %p\n", 293 gebase + 0x200 + (i * VECTORSPACING)); 294 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, 295 mips32_exceptionEnd - mips32_exception); 296 } 297 298 /* General handler, relocate to unmapped space for sanity's sake */ 299 offset = 0x2000; 300 kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n", 301 gebase + offset, 302 mips32_GuestExceptionEnd - mips32_GuestException); 303 304 memcpy(gebase + offset, mips32_GuestException, 305 mips32_GuestExceptionEnd - mips32_GuestException); 306 307 /* Invalidate the icache for these ranges */ 308 local_flush_icache_range((unsigned long)gebase, 309 (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); 310 311 /* 312 * Allocate comm page for guest kernel, a TLB will be reserved for 313 * mapping GVA @ 0xFFFF8000 to this page 314 */ 315 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); 316 317 if (!vcpu->arch.kseg0_commpage) { 318 err = -ENOMEM; 319 goto out_free_gebase; 320 } 321 322 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); 323 kvm_mips_commpage_init(vcpu); 324 325 /* Init */ 326 vcpu->arch.last_sched_cpu = -1; 327 328 /* Start off the timer */ 329 kvm_mips_init_count(vcpu); 330 331 return vcpu; 332 333 out_free_gebase: 334 kfree(gebase); 335 336 out_free_cpu: 337 kfree(vcpu); 338 339 out: 340 return ERR_PTR(err); 341 } 342 343 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 344 { 345 hrtimer_cancel(&vcpu->arch.comparecount_timer); 346 347 kvm_vcpu_uninit(vcpu); 348 349 kvm_mips_dump_stats(vcpu); 350 351 kfree(vcpu->arch.guest_ebase); 352 kfree(vcpu->arch.kseg0_commpage); 353 kfree(vcpu); 354 } 355 356 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 357 { 358 kvm_arch_vcpu_free(vcpu); 359 } 360 361 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 362 struct kvm_guest_debug *dbg) 363 { 364 return -ENOIOCTLCMD; 365 } 366 367 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 368 { 369 int r = 0; 370 sigset_t sigsaved; 371 372 if (vcpu->sigset_active) 373 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 374 375 if (vcpu->mmio_needed) { 376 if (!vcpu->mmio_is_write) 377 kvm_mips_complete_mmio_load(vcpu, run); 378 vcpu->mmio_needed = 0; 379 } 380 381 local_irq_disable(); 382 /* Check if we have any exceptions/interrupts pending */ 383 kvm_mips_deliver_interrupts(vcpu, 384 kvm_read_c0_guest_cause(vcpu->arch.cop0)); 385 386 kvm_guest_enter(); 387 388 r = __kvm_mips_vcpu_run(run, vcpu); 389 390 kvm_guest_exit(); 391 local_irq_enable(); 392 393 if (vcpu->sigset_active) 394 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 395 396 return r; 397 } 398 399 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 400 struct kvm_mips_interrupt *irq) 401 { 402 int intr = (int)irq->irq; 403 struct kvm_vcpu *dvcpu = NULL; 404 405 if (intr == 3 || intr == -3 || intr == 4 || intr == -4) 406 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, 407 (int)intr); 408 409 if (irq->cpu == -1) 410 dvcpu = vcpu; 411 else 412 dvcpu = vcpu->kvm->vcpus[irq->cpu]; 413 414 if (intr == 2 || intr == 3 || intr == 4) { 415 kvm_mips_callbacks->queue_io_int(dvcpu, irq); 416 417 } else if (intr == -2 || intr == -3 || intr == -4) { 418 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); 419 } else { 420 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, 421 irq->cpu, irq->irq); 422 return -EINVAL; 423 } 424 425 dvcpu->arch.wait = 0; 426 427 if (waitqueue_active(&dvcpu->wq)) 428 wake_up_interruptible(&dvcpu->wq); 429 430 return 0; 431 } 432 433 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 434 struct kvm_mp_state *mp_state) 435 { 436 return -ENOIOCTLCMD; 437 } 438 439 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 440 struct kvm_mp_state *mp_state) 441 { 442 return -ENOIOCTLCMD; 443 } 444 445 static u64 kvm_mips_get_one_regs[] = { 446 KVM_REG_MIPS_R0, 447 KVM_REG_MIPS_R1, 448 KVM_REG_MIPS_R2, 449 KVM_REG_MIPS_R3, 450 KVM_REG_MIPS_R4, 451 KVM_REG_MIPS_R5, 452 KVM_REG_MIPS_R6, 453 KVM_REG_MIPS_R7, 454 KVM_REG_MIPS_R8, 455 KVM_REG_MIPS_R9, 456 KVM_REG_MIPS_R10, 457 KVM_REG_MIPS_R11, 458 KVM_REG_MIPS_R12, 459 KVM_REG_MIPS_R13, 460 KVM_REG_MIPS_R14, 461 KVM_REG_MIPS_R15, 462 KVM_REG_MIPS_R16, 463 KVM_REG_MIPS_R17, 464 KVM_REG_MIPS_R18, 465 KVM_REG_MIPS_R19, 466 KVM_REG_MIPS_R20, 467 KVM_REG_MIPS_R21, 468 KVM_REG_MIPS_R22, 469 KVM_REG_MIPS_R23, 470 KVM_REG_MIPS_R24, 471 KVM_REG_MIPS_R25, 472 KVM_REG_MIPS_R26, 473 KVM_REG_MIPS_R27, 474 KVM_REG_MIPS_R28, 475 KVM_REG_MIPS_R29, 476 KVM_REG_MIPS_R30, 477 KVM_REG_MIPS_R31, 478 479 KVM_REG_MIPS_HI, 480 KVM_REG_MIPS_LO, 481 KVM_REG_MIPS_PC, 482 483 KVM_REG_MIPS_CP0_INDEX, 484 KVM_REG_MIPS_CP0_CONTEXT, 485 KVM_REG_MIPS_CP0_USERLOCAL, 486 KVM_REG_MIPS_CP0_PAGEMASK, 487 KVM_REG_MIPS_CP0_WIRED, 488 KVM_REG_MIPS_CP0_HWRENA, 489 KVM_REG_MIPS_CP0_BADVADDR, 490 KVM_REG_MIPS_CP0_COUNT, 491 KVM_REG_MIPS_CP0_ENTRYHI, 492 KVM_REG_MIPS_CP0_COMPARE, 493 KVM_REG_MIPS_CP0_STATUS, 494 KVM_REG_MIPS_CP0_CAUSE, 495 KVM_REG_MIPS_CP0_EPC, 496 KVM_REG_MIPS_CP0_CONFIG, 497 KVM_REG_MIPS_CP0_CONFIG1, 498 KVM_REG_MIPS_CP0_CONFIG2, 499 KVM_REG_MIPS_CP0_CONFIG3, 500 KVM_REG_MIPS_CP0_CONFIG7, 501 KVM_REG_MIPS_CP0_ERROREPC, 502 503 KVM_REG_MIPS_COUNT_CTL, 504 KVM_REG_MIPS_COUNT_RESUME, 505 KVM_REG_MIPS_COUNT_HZ, 506 }; 507 508 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, 509 const struct kvm_one_reg *reg) 510 { 511 struct mips_coproc *cop0 = vcpu->arch.cop0; 512 int ret; 513 s64 v; 514 515 switch (reg->id) { 516 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: 517 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; 518 break; 519 case KVM_REG_MIPS_HI: 520 v = (long)vcpu->arch.hi; 521 break; 522 case KVM_REG_MIPS_LO: 523 v = (long)vcpu->arch.lo; 524 break; 525 case KVM_REG_MIPS_PC: 526 v = (long)vcpu->arch.pc; 527 break; 528 529 case KVM_REG_MIPS_CP0_INDEX: 530 v = (long)kvm_read_c0_guest_index(cop0); 531 break; 532 case KVM_REG_MIPS_CP0_CONTEXT: 533 v = (long)kvm_read_c0_guest_context(cop0); 534 break; 535 case KVM_REG_MIPS_CP0_USERLOCAL: 536 v = (long)kvm_read_c0_guest_userlocal(cop0); 537 break; 538 case KVM_REG_MIPS_CP0_PAGEMASK: 539 v = (long)kvm_read_c0_guest_pagemask(cop0); 540 break; 541 case KVM_REG_MIPS_CP0_WIRED: 542 v = (long)kvm_read_c0_guest_wired(cop0); 543 break; 544 case KVM_REG_MIPS_CP0_HWRENA: 545 v = (long)kvm_read_c0_guest_hwrena(cop0); 546 break; 547 case KVM_REG_MIPS_CP0_BADVADDR: 548 v = (long)kvm_read_c0_guest_badvaddr(cop0); 549 break; 550 case KVM_REG_MIPS_CP0_ENTRYHI: 551 v = (long)kvm_read_c0_guest_entryhi(cop0); 552 break; 553 case KVM_REG_MIPS_CP0_COMPARE: 554 v = (long)kvm_read_c0_guest_compare(cop0); 555 break; 556 case KVM_REG_MIPS_CP0_STATUS: 557 v = (long)kvm_read_c0_guest_status(cop0); 558 break; 559 case KVM_REG_MIPS_CP0_CAUSE: 560 v = (long)kvm_read_c0_guest_cause(cop0); 561 break; 562 case KVM_REG_MIPS_CP0_EPC: 563 v = (long)kvm_read_c0_guest_epc(cop0); 564 break; 565 case KVM_REG_MIPS_CP0_ERROREPC: 566 v = (long)kvm_read_c0_guest_errorepc(cop0); 567 break; 568 case KVM_REG_MIPS_CP0_CONFIG: 569 v = (long)kvm_read_c0_guest_config(cop0); 570 break; 571 case KVM_REG_MIPS_CP0_CONFIG1: 572 v = (long)kvm_read_c0_guest_config1(cop0); 573 break; 574 case KVM_REG_MIPS_CP0_CONFIG2: 575 v = (long)kvm_read_c0_guest_config2(cop0); 576 break; 577 case KVM_REG_MIPS_CP0_CONFIG3: 578 v = (long)kvm_read_c0_guest_config3(cop0); 579 break; 580 case KVM_REG_MIPS_CP0_CONFIG7: 581 v = (long)kvm_read_c0_guest_config7(cop0); 582 break; 583 /* registers to be handled specially */ 584 case KVM_REG_MIPS_CP0_COUNT: 585 case KVM_REG_MIPS_COUNT_CTL: 586 case KVM_REG_MIPS_COUNT_RESUME: 587 case KVM_REG_MIPS_COUNT_HZ: 588 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); 589 if (ret) 590 return ret; 591 break; 592 default: 593 return -EINVAL; 594 } 595 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 596 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 597 598 return put_user(v, uaddr64); 599 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 600 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 601 u32 v32 = (u32)v; 602 603 return put_user(v32, uaddr32); 604 } else { 605 return -EINVAL; 606 } 607 } 608 609 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, 610 const struct kvm_one_reg *reg) 611 { 612 struct mips_coproc *cop0 = vcpu->arch.cop0; 613 u64 v; 614 615 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 616 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 617 618 if (get_user(v, uaddr64) != 0) 619 return -EFAULT; 620 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 621 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 622 s32 v32; 623 624 if (get_user(v32, uaddr32) != 0) 625 return -EFAULT; 626 v = (s64)v32; 627 } else { 628 return -EINVAL; 629 } 630 631 switch (reg->id) { 632 case KVM_REG_MIPS_R0: 633 /* Silently ignore requests to set $0 */ 634 break; 635 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: 636 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; 637 break; 638 case KVM_REG_MIPS_HI: 639 vcpu->arch.hi = v; 640 break; 641 case KVM_REG_MIPS_LO: 642 vcpu->arch.lo = v; 643 break; 644 case KVM_REG_MIPS_PC: 645 vcpu->arch.pc = v; 646 break; 647 648 case KVM_REG_MIPS_CP0_INDEX: 649 kvm_write_c0_guest_index(cop0, v); 650 break; 651 case KVM_REG_MIPS_CP0_CONTEXT: 652 kvm_write_c0_guest_context(cop0, v); 653 break; 654 case KVM_REG_MIPS_CP0_USERLOCAL: 655 kvm_write_c0_guest_userlocal(cop0, v); 656 break; 657 case KVM_REG_MIPS_CP0_PAGEMASK: 658 kvm_write_c0_guest_pagemask(cop0, v); 659 break; 660 case KVM_REG_MIPS_CP0_WIRED: 661 kvm_write_c0_guest_wired(cop0, v); 662 break; 663 case KVM_REG_MIPS_CP0_HWRENA: 664 kvm_write_c0_guest_hwrena(cop0, v); 665 break; 666 case KVM_REG_MIPS_CP0_BADVADDR: 667 kvm_write_c0_guest_badvaddr(cop0, v); 668 break; 669 case KVM_REG_MIPS_CP0_ENTRYHI: 670 kvm_write_c0_guest_entryhi(cop0, v); 671 break; 672 case KVM_REG_MIPS_CP0_STATUS: 673 kvm_write_c0_guest_status(cop0, v); 674 break; 675 case KVM_REG_MIPS_CP0_EPC: 676 kvm_write_c0_guest_epc(cop0, v); 677 break; 678 case KVM_REG_MIPS_CP0_ERROREPC: 679 kvm_write_c0_guest_errorepc(cop0, v); 680 break; 681 /* registers to be handled specially */ 682 case KVM_REG_MIPS_CP0_COUNT: 683 case KVM_REG_MIPS_CP0_COMPARE: 684 case KVM_REG_MIPS_CP0_CAUSE: 685 case KVM_REG_MIPS_COUNT_CTL: 686 case KVM_REG_MIPS_COUNT_RESUME: 687 case KVM_REG_MIPS_COUNT_HZ: 688 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); 689 default: 690 return -EINVAL; 691 } 692 return 0; 693 } 694 695 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, 696 unsigned long arg) 697 { 698 struct kvm_vcpu *vcpu = filp->private_data; 699 void __user *argp = (void __user *)arg; 700 long r; 701 702 switch (ioctl) { 703 case KVM_SET_ONE_REG: 704 case KVM_GET_ONE_REG: { 705 struct kvm_one_reg reg; 706 707 if (copy_from_user(®, argp, sizeof(reg))) 708 return -EFAULT; 709 if (ioctl == KVM_SET_ONE_REG) 710 return kvm_mips_set_reg(vcpu, ®); 711 else 712 return kvm_mips_get_reg(vcpu, ®); 713 } 714 case KVM_GET_REG_LIST: { 715 struct kvm_reg_list __user *user_list = argp; 716 u64 __user *reg_dest; 717 struct kvm_reg_list reg_list; 718 unsigned n; 719 720 if (copy_from_user(®_list, user_list, sizeof(reg_list))) 721 return -EFAULT; 722 n = reg_list.n; 723 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); 724 if (copy_to_user(user_list, ®_list, sizeof(reg_list))) 725 return -EFAULT; 726 if (n < reg_list.n) 727 return -E2BIG; 728 reg_dest = user_list->reg; 729 if (copy_to_user(reg_dest, kvm_mips_get_one_regs, 730 sizeof(kvm_mips_get_one_regs))) 731 return -EFAULT; 732 return 0; 733 } 734 case KVM_NMI: 735 /* Treat the NMI as a CPU reset */ 736 r = kvm_mips_reset_vcpu(vcpu); 737 break; 738 case KVM_INTERRUPT: 739 { 740 struct kvm_mips_interrupt irq; 741 742 r = -EFAULT; 743 if (copy_from_user(&irq, argp, sizeof(irq))) 744 goto out; 745 746 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, 747 irq.irq); 748 749 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 750 break; 751 } 752 default: 753 r = -ENOIOCTLCMD; 754 } 755 756 out: 757 return r; 758 } 759 760 /* Get (and clear) the dirty memory log for a memory slot. */ 761 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 762 { 763 struct kvm_memory_slot *memslot; 764 unsigned long ga, ga_end; 765 int is_dirty = 0; 766 int r; 767 unsigned long n; 768 769 mutex_lock(&kvm->slots_lock); 770 771 r = kvm_get_dirty_log(kvm, log, &is_dirty); 772 if (r) 773 goto out; 774 775 /* If nothing is dirty, don't bother messing with page tables. */ 776 if (is_dirty) { 777 memslot = &kvm->memslots->memslots[log->slot]; 778 779 ga = memslot->base_gfn << PAGE_SHIFT; 780 ga_end = ga + (memslot->npages << PAGE_SHIFT); 781 782 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, 783 ga_end); 784 785 n = kvm_dirty_bitmap_bytes(memslot); 786 memset(memslot->dirty_bitmap, 0, n); 787 } 788 789 r = 0; 790 out: 791 mutex_unlock(&kvm->slots_lock); 792 return r; 793 794 } 795 796 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 797 { 798 long r; 799 800 switch (ioctl) { 801 default: 802 r = -ENOIOCTLCMD; 803 } 804 805 return r; 806 } 807 808 int kvm_arch_init(void *opaque) 809 { 810 if (kvm_mips_callbacks) { 811 kvm_err("kvm: module already exists\n"); 812 return -EEXIST; 813 } 814 815 return kvm_mips_emulation_init(&kvm_mips_callbacks); 816 } 817 818 void kvm_arch_exit(void) 819 { 820 kvm_mips_callbacks = NULL; 821 } 822 823 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 824 struct kvm_sregs *sregs) 825 { 826 return -ENOIOCTLCMD; 827 } 828 829 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 830 struct kvm_sregs *sregs) 831 { 832 return -ENOIOCTLCMD; 833 } 834 835 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 836 { 837 return 0; 838 } 839 840 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 841 { 842 return -ENOIOCTLCMD; 843 } 844 845 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 846 { 847 return -ENOIOCTLCMD; 848 } 849 850 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 851 { 852 return VM_FAULT_SIGBUS; 853 } 854 855 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 856 { 857 int r; 858 859 switch (ext) { 860 case KVM_CAP_ONE_REG: 861 r = 1; 862 break; 863 case KVM_CAP_COALESCED_MMIO: 864 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 865 break; 866 default: 867 r = 0; 868 break; 869 } 870 return r; 871 } 872 873 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 874 { 875 return kvm_mips_pending_timer(vcpu); 876 } 877 878 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) 879 { 880 int i; 881 struct mips_coproc *cop0; 882 883 if (!vcpu) 884 return -1; 885 886 kvm_debug("VCPU Register Dump:\n"); 887 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); 888 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); 889 890 for (i = 0; i < 32; i += 4) { 891 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, 892 vcpu->arch.gprs[i], 893 vcpu->arch.gprs[i + 1], 894 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); 895 } 896 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); 897 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); 898 899 cop0 = vcpu->arch.cop0; 900 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", 901 kvm_read_c0_guest_status(cop0), 902 kvm_read_c0_guest_cause(cop0)); 903 904 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); 905 906 return 0; 907 } 908 909 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 910 { 911 int i; 912 913 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) 914 vcpu->arch.gprs[i] = regs->gpr[i]; 915 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ 916 vcpu->arch.hi = regs->hi; 917 vcpu->arch.lo = regs->lo; 918 vcpu->arch.pc = regs->pc; 919 920 return 0; 921 } 922 923 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 924 { 925 int i; 926 927 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) 928 regs->gpr[i] = vcpu->arch.gprs[i]; 929 930 regs->hi = vcpu->arch.hi; 931 regs->lo = vcpu->arch.lo; 932 regs->pc = vcpu->arch.pc; 933 934 return 0; 935 } 936 937 static void kvm_mips_comparecount_func(unsigned long data) 938 { 939 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 940 941 kvm_mips_callbacks->queue_timer_int(vcpu); 942 943 vcpu->arch.wait = 0; 944 if (waitqueue_active(&vcpu->wq)) 945 wake_up_interruptible(&vcpu->wq); 946 } 947 948 /* low level hrtimer wake routine */ 949 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) 950 { 951 struct kvm_vcpu *vcpu; 952 953 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); 954 kvm_mips_comparecount_func((unsigned long) vcpu); 955 return kvm_mips_count_timeout(vcpu); 956 } 957 958 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 959 { 960 kvm_mips_callbacks->vcpu_init(vcpu); 961 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, 962 HRTIMER_MODE_REL); 963 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; 964 return 0; 965 } 966 967 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 968 struct kvm_translation *tr) 969 { 970 return 0; 971 } 972 973 /* Initial guest state */ 974 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 975 { 976 return kvm_mips_callbacks->vcpu_setup(vcpu); 977 } 978 979 static void kvm_mips_set_c0_status(void) 980 { 981 uint32_t status = read_c0_status(); 982 983 if (cpu_has_fpu) 984 status |= (ST0_CU1); 985 986 if (cpu_has_dsp) 987 status |= (ST0_MX); 988 989 write_c0_status(status); 990 ehb(); 991 } 992 993 /* 994 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) 995 */ 996 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) 997 { 998 uint32_t cause = vcpu->arch.host_cp0_cause; 999 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 1000 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; 1001 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 1002 enum emulation_result er = EMULATE_DONE; 1003 int ret = RESUME_GUEST; 1004 1005 /* Set a default exit reason */ 1006 run->exit_reason = KVM_EXIT_UNKNOWN; 1007 run->ready_for_interrupt_injection = 1; 1008 1009 /* 1010 * Set the appropriate status bits based on host CPU features, 1011 * before we hit the scheduler 1012 */ 1013 kvm_mips_set_c0_status(); 1014 1015 local_irq_enable(); 1016 1017 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", 1018 cause, opc, run, vcpu); 1019 1020 /* 1021 * Do a privilege check, if in UM most of these exit conditions end up 1022 * causing an exception to be delivered to the Guest Kernel 1023 */ 1024 er = kvm_mips_check_privilege(cause, opc, run, vcpu); 1025 if (er == EMULATE_PRIV_FAIL) { 1026 goto skip_emul; 1027 } else if (er == EMULATE_FAIL) { 1028 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1029 ret = RESUME_HOST; 1030 goto skip_emul; 1031 } 1032 1033 switch (exccode) { 1034 case T_INT: 1035 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc); 1036 1037 ++vcpu->stat.int_exits; 1038 trace_kvm_exit(vcpu, INT_EXITS); 1039 1040 if (need_resched()) 1041 cond_resched(); 1042 1043 ret = RESUME_GUEST; 1044 break; 1045 1046 case T_COP_UNUSABLE: 1047 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc); 1048 1049 ++vcpu->stat.cop_unusable_exits; 1050 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); 1051 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); 1052 /* XXXKYMA: Might need to return to user space */ 1053 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) 1054 ret = RESUME_HOST; 1055 break; 1056 1057 case T_TLB_MOD: 1058 ++vcpu->stat.tlbmod_exits; 1059 trace_kvm_exit(vcpu, TLBMOD_EXITS); 1060 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); 1061 break; 1062 1063 case T_TLB_ST_MISS: 1064 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", 1065 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, 1066 badvaddr); 1067 1068 ++vcpu->stat.tlbmiss_st_exits; 1069 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); 1070 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); 1071 break; 1072 1073 case T_TLB_LD_MISS: 1074 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", 1075 cause, opc, badvaddr); 1076 1077 ++vcpu->stat.tlbmiss_ld_exits; 1078 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS); 1079 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); 1080 break; 1081 1082 case T_ADDR_ERR_ST: 1083 ++vcpu->stat.addrerr_st_exits; 1084 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS); 1085 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); 1086 break; 1087 1088 case T_ADDR_ERR_LD: 1089 ++vcpu->stat.addrerr_ld_exits; 1090 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS); 1091 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); 1092 break; 1093 1094 case T_SYSCALL: 1095 ++vcpu->stat.syscall_exits; 1096 trace_kvm_exit(vcpu, SYSCALL_EXITS); 1097 ret = kvm_mips_callbacks->handle_syscall(vcpu); 1098 break; 1099 1100 case T_RES_INST: 1101 ++vcpu->stat.resvd_inst_exits; 1102 trace_kvm_exit(vcpu, RESVD_INST_EXITS); 1103 ret = kvm_mips_callbacks->handle_res_inst(vcpu); 1104 break; 1105 1106 case T_BREAK: 1107 ++vcpu->stat.break_inst_exits; 1108 trace_kvm_exit(vcpu, BREAK_INST_EXITS); 1109 ret = kvm_mips_callbacks->handle_break(vcpu); 1110 break; 1111 1112 default: 1113 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", 1114 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, 1115 kvm_read_c0_guest_status(vcpu->arch.cop0)); 1116 kvm_arch_vcpu_dump_regs(vcpu); 1117 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1118 ret = RESUME_HOST; 1119 break; 1120 1121 } 1122 1123 skip_emul: 1124 local_irq_disable(); 1125 1126 if (er == EMULATE_DONE && !(ret & RESUME_HOST)) 1127 kvm_mips_deliver_interrupts(vcpu, cause); 1128 1129 if (!(ret & RESUME_HOST)) { 1130 /* Only check for signals if not already exiting to userspace */ 1131 if (signal_pending(current)) { 1132 run->exit_reason = KVM_EXIT_INTR; 1133 ret = (-EINTR << 2) | RESUME_HOST; 1134 ++vcpu->stat.signal_exits; 1135 trace_kvm_exit(vcpu, SIGNAL_EXITS); 1136 } 1137 } 1138 1139 return ret; 1140 } 1141 1142 int __init kvm_mips_init(void) 1143 { 1144 int ret; 1145 1146 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1147 1148 if (ret) 1149 return ret; 1150 1151 /* 1152 * On MIPS, kernel modules are executed from "mapped space", which 1153 * requires TLBs. The TLB handling code is statically linked with 1154 * the rest of the kernel (tlb.c) to avoid the possibility of 1155 * double faulting. The issue is that the TLB code references 1156 * routines that are part of the the KVM module, which are only 1157 * available once the module is loaded. 1158 */ 1159 kvm_mips_gfn_to_pfn = gfn_to_pfn; 1160 kvm_mips_release_pfn_clean = kvm_release_pfn_clean; 1161 kvm_mips_is_error_pfn = is_error_pfn; 1162 1163 pr_info("KVM/MIPS Initialized\n"); 1164 return 0; 1165 } 1166 1167 void __exit kvm_mips_exit(void) 1168 { 1169 kvm_exit(); 1170 1171 kvm_mips_gfn_to_pfn = NULL; 1172 kvm_mips_release_pfn_clean = NULL; 1173 kvm_mips_is_error_pfn = NULL; 1174 1175 pr_info("KVM/MIPS unloaded\n"); 1176 } 1177 1178 module_init(kvm_mips_init); 1179 module_exit(kvm_mips_exit); 1180 1181 EXPORT_TRACEPOINT_SYMBOL(kvm_exit); 1182