Lines Matching +full:reserved +full:- +full:ipi +full:- +full:vectors
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
115 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable()
130 return kvm_mips_callbacks->hardware_enable(); in kvm_arch_hardware_enable()
135 kvm_mips_callbacks->hardware_disable(); in kvm_arch_hardware_disable()
149 return -EINVAL; in kvm_arch_init_vm()
152 /* Allocate page table to map GPA -> RPA */ in kvm_arch_init_vm()
153 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); in kvm_arch_init_vm()
154 if (!kvm->arch.gpa_mm.pgd) in kvm_arch_init_vm()
155 return -ENOMEM; in kvm_arch_init_vm()
168 pgd_free(NULL, kvm->arch.gpa_mm.pgd); in kvm_mips_free_gpa_pt()
180 return -ENOIOCTLCMD; in kvm_arch_dev_ioctl()
198 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
200 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot()
201 slot->base_gfn + slot->npages - 1); in kvm_arch_flush_shadow_memslot()
203 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
231 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && in kvm_arch_commit_memory_region()
232 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { in kvm_arch_commit_memory_region()
233 spin_lock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
235 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region()
236 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region()
239 spin_unlock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
267 kvm_mips_callbacks->queue_timer_int(vcpu); in kvm_mips_comparecount_wakeup()
269 vcpu->arch.wait = 0; in kvm_mips_comparecount_wakeup()
270 rcuwait_wake_up(&vcpu->wait); in kvm_mips_comparecount_wakeup()
287 vcpu->kvm, vcpu->vcpu_id, vcpu); in kvm_arch_vcpu_create()
289 err = kvm_mips_callbacks->vcpu_init(vcpu); in kvm_arch_vcpu_create()
293 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, in kvm_arch_vcpu_create()
295 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; in kvm_arch_vcpu_create()
309 err = -ENOMEM; in kvm_arch_vcpu_create()
323 err = -ENOMEM; in kvm_arch_vcpu_create()
328 vcpu->arch.guest_ebase = gebase; in kvm_arch_vcpu_create()
330 /* Build guest exception vectors dynamically in unmapped memory */ in kvm_arch_vcpu_create()
333 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ in kvm_arch_vcpu_create()
342 /* For vectored interrupts poke the exception code @ all offsets 0-7 */ in kvm_arch_vcpu_create()
355 vcpu->arch.vcpu_run = p; in kvm_arch_vcpu_create()
362 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); in kvm_arch_vcpu_create()
365 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); in kvm_arch_vcpu_create()
372 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
373 vcpu->arch.last_exec_cpu = -1; in kvm_arch_vcpu_create()
376 err = kvm_mips_callbacks->vcpu_setup(vcpu); in kvm_arch_vcpu_create()
385 kvm_mips_callbacks->vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
391 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_arch_vcpu_destroy()
396 kfree(vcpu->arch.guest_ebase); in kvm_arch_vcpu_destroy()
398 kvm_mips_callbacks->vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
404 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_guest_debug()
419 ret = kvm_mips_callbacks->vcpu_run(vcpu); in kvm_mips_vcpu_enter_exit()
427 int r = -EINTR; in kvm_arch_vcpu_ioctl_run()
433 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
434 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
436 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
439 if (vcpu->run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
450 * reordered ahead of the write to vcpu->mode, or we could miss a TLB in kvm_arch_vcpu_ioctl_run()
452 * mode and not needing an IPI. in kvm_arch_vcpu_ioctl_run()
454 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
485 int intr = (int)irq->irq; in kvm_vcpu_ioctl_interrupt()
490 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) || in kvm_vcpu_ioctl_interrupt()
491 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2])) in kvm_vcpu_ioctl_interrupt()
492 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, in kvm_vcpu_ioctl_interrupt()
495 if (irq->cpu == -1) in kvm_vcpu_ioctl_interrupt()
498 dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu); in kvm_vcpu_ioctl_interrupt()
501 kvm_mips_callbacks->queue_io_int(dvcpu, irq); in kvm_vcpu_ioctl_interrupt()
503 } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) { in kvm_vcpu_ioctl_interrupt()
504 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); in kvm_vcpu_ioctl_interrupt()
507 irq->cpu, irq->irq); in kvm_vcpu_ioctl_interrupt()
508 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
511 dvcpu->arch.wait = 0; in kvm_vcpu_ioctl_interrupt()
513 rcuwait_wake_up(&dvcpu->wait); in kvm_vcpu_ioctl_interrupt()
521 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_get_mpstate()
527 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_mpstate()
586 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { in kvm_mips_num_regs()
592 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) in kvm_mips_num_regs()
594 ret += kvm_mips_callbacks->num_regs(vcpu); in kvm_mips_num_regs()
606 return -EFAULT; in kvm_mips_copy_reg_indices()
609 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { in kvm_mips_copy_reg_indices()
612 return -EFAULT; in kvm_mips_copy_reg_indices()
618 return -EFAULT; in kvm_mips_copy_reg_indices()
627 return -EFAULT; in kvm_mips_copy_reg_indices()
632 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { in kvm_mips_copy_reg_indices()
635 return -EFAULT; in kvm_mips_copy_reg_indices()
641 return -EFAULT; in kvm_mips_copy_reg_indices()
646 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); in kvm_mips_copy_reg_indices()
652 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_mips_get_reg()
653 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_get_reg()
659 switch (reg->id) { in kvm_mips_get_reg()
662 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; in kvm_mips_get_reg()
666 v = (long)vcpu->arch.hi; in kvm_mips_get_reg()
669 v = (long)vcpu->arch.lo; in kvm_mips_get_reg()
673 v = (long)vcpu->arch.pc; in kvm_mips_get_reg()
678 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
679 return -EINVAL; in kvm_mips_get_reg()
680 idx = reg->id - KVM_REG_MIPS_FPR_32(0); in kvm_mips_get_reg()
683 v = get_fpr32(&fpu->fpr[idx], 0); in kvm_mips_get_reg()
685 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); in kvm_mips_get_reg()
688 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
689 return -EINVAL; in kvm_mips_get_reg()
690 idx = reg->id - KVM_REG_MIPS_FPR_64(0); in kvm_mips_get_reg()
693 return -EINVAL; in kvm_mips_get_reg()
694 v = get_fpr64(&fpu->fpr[idx], 0); in kvm_mips_get_reg()
697 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
698 return -EINVAL; in kvm_mips_get_reg()
702 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
703 return -EINVAL; in kvm_mips_get_reg()
704 v = fpu->fcr31; in kvm_mips_get_reg()
709 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
710 return -EINVAL; in kvm_mips_get_reg()
713 return -EINVAL; in kvm_mips_get_reg()
714 idx = reg->id - KVM_REG_MIPS_VEC_128(0); in kvm_mips_get_reg()
717 vs[0] = get_fpr64(&fpu->fpr[idx], 0); in kvm_mips_get_reg()
718 vs[1] = get_fpr64(&fpu->fpr[idx], 1); in kvm_mips_get_reg()
721 vs[0] = get_fpr64(&fpu->fpr[idx], 1); in kvm_mips_get_reg()
722 vs[1] = get_fpr64(&fpu->fpr[idx], 0); in kvm_mips_get_reg()
726 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
727 return -EINVAL; in kvm_mips_get_reg()
731 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
732 return -EINVAL; in kvm_mips_get_reg()
733 v = fpu->msacsr; in kvm_mips_get_reg()
738 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); in kvm_mips_get_reg()
743 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { in kvm_mips_get_reg()
744 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; in kvm_mips_get_reg()
747 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { in kvm_mips_get_reg()
748 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; in kvm_mips_get_reg()
752 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { in kvm_mips_get_reg()
753 void __user *uaddr = (void __user *)(long)reg->addr; in kvm_mips_get_reg()
755 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; in kvm_mips_get_reg()
757 return -EINVAL; in kvm_mips_get_reg()
764 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_mips_set_reg()
765 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_set_reg()
770 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { in kvm_mips_set_reg()
771 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; in kvm_mips_set_reg()
774 return -EFAULT; in kvm_mips_set_reg()
775 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { in kvm_mips_set_reg()
776 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; in kvm_mips_set_reg()
780 return -EFAULT; in kvm_mips_set_reg()
782 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { in kvm_mips_set_reg()
783 void __user *uaddr = (void __user *)(long)reg->addr; in kvm_mips_set_reg()
785 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; in kvm_mips_set_reg()
787 return -EINVAL; in kvm_mips_set_reg()
790 switch (reg->id) { in kvm_mips_set_reg()
796 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; in kvm_mips_set_reg()
800 vcpu->arch.hi = v; in kvm_mips_set_reg()
803 vcpu->arch.lo = v; in kvm_mips_set_reg()
807 vcpu->arch.pc = v; in kvm_mips_set_reg()
812 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
813 return -EINVAL; in kvm_mips_set_reg()
814 idx = reg->id - KVM_REG_MIPS_FPR_32(0); in kvm_mips_set_reg()
817 set_fpr32(&fpu->fpr[idx], 0, v); in kvm_mips_set_reg()
819 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); in kvm_mips_set_reg()
822 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
823 return -EINVAL; in kvm_mips_set_reg()
824 idx = reg->id - KVM_REG_MIPS_FPR_64(0); in kvm_mips_set_reg()
827 return -EINVAL; in kvm_mips_set_reg()
828 set_fpr64(&fpu->fpr[idx], 0, v); in kvm_mips_set_reg()
831 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
832 return -EINVAL; in kvm_mips_set_reg()
833 /* Read-only */ in kvm_mips_set_reg()
836 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
837 return -EINVAL; in kvm_mips_set_reg()
838 fpu->fcr31 = v; in kvm_mips_set_reg()
843 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
844 return -EINVAL; in kvm_mips_set_reg()
845 idx = reg->id - KVM_REG_MIPS_VEC_128(0); in kvm_mips_set_reg()
848 set_fpr64(&fpu->fpr[idx], 0, vs[0]); in kvm_mips_set_reg()
849 set_fpr64(&fpu->fpr[idx], 1, vs[1]); in kvm_mips_set_reg()
852 set_fpr64(&fpu->fpr[idx], 1, vs[0]); in kvm_mips_set_reg()
853 set_fpr64(&fpu->fpr[idx], 0, vs[1]); in kvm_mips_set_reg()
857 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
858 return -EINVAL; in kvm_mips_set_reg()
859 /* Read-only */ in kvm_mips_set_reg()
862 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
863 return -EINVAL; in kvm_mips_set_reg()
864 fpu->msacsr = v; in kvm_mips_set_reg()
869 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); in kvm_mips_set_reg()
879 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) in kvm_vcpu_ioctl_enable_cap()
880 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
881 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
882 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
883 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
884 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
886 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
888 vcpu->arch.fpu_enabled = true; in kvm_vcpu_ioctl_enable_cap()
891 vcpu->arch.msa_enabled = true; in kvm_vcpu_ioctl_enable_cap()
894 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
904 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
911 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
912 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, in kvm_arch_vcpu_async_ioctl()
918 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
924 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
935 r = -EFAULT; in kvm_arch_vcpu_ioctl()
949 r = -EFAULT; in kvm_arch_vcpu_ioctl()
956 r = -E2BIG; in kvm_arch_vcpu_ioctl()
959 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
965 r = -EFAULT; in kvm_arch_vcpu_ioctl()
972 r = -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl()
986 kvm_mips_callbacks->prepare_flush_shadow(kvm); in kvm_arch_flush_remote_tlbs()
996 r = -ENOIOCTLCMD; in kvm_arch_vm_ioctl()
1005 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_get_sregs()
1011 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_sregs()
1020 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_get_fpu()
1025 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_fpu()
1074 r = kvm_mips_callbacks->check_extension(kvm, ext); in kvm_vm_ioctl_check_extension()
1083 kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI; in kvm_cpu_has_pending_timer()
1092 return -1; in kvm_arch_vcpu_dump_regs()
1095 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
1096 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvm_arch_vcpu_dump_regs()
1100 vcpu->arch.gprs[i], in kvm_arch_vcpu_dump_regs()
1101 vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
1102 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
1104 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); in kvm_arch_vcpu_dump_regs()
1105 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); in kvm_arch_vcpu_dump_regs()
1107 cop0 = &vcpu->arch.cop0; in kvm_arch_vcpu_dump_regs()
1123 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
1124 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
1125 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
1126 vcpu->arch.hi = regs->hi; in kvm_arch_vcpu_ioctl_set_regs()
1127 vcpu->arch.lo = regs->lo; in kvm_arch_vcpu_ioctl_set_regs()
1128 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
1140 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
1141 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
1143 regs->hi = vcpu->arch.hi; in kvm_arch_vcpu_ioctl_get_regs()
1144 regs->lo = vcpu->arch.lo; in kvm_arch_vcpu_ioctl_get_regs()
1145 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
1173 struct kvm_run *run = vcpu->run; in __kvm_mips_handle_exit()
1174 u32 cause = vcpu->arch.host_cp0_cause; in __kvm_mips_handle_exit()
1176 u32 __user *opc = (u32 __user *) vcpu->arch.pc; in __kvm_mips_handle_exit()
1177 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in __kvm_mips_handle_exit()
1182 vcpu->mode = OUTSIDE_GUEST_MODE; in __kvm_mips_handle_exit()
1185 run->exit_reason = KVM_EXIT_UNKNOWN; in __kvm_mips_handle_exit()
1186 run->ready_for_interrupt_injection = 1; in __kvm_mips_handle_exit()
1202 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); in __kvm_mips_handle_exit()
1204 ++vcpu->stat.int_exits; in __kvm_mips_handle_exit()
1215 ++vcpu->stat.cop_unusable_exits; in __kvm_mips_handle_exit()
1216 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); in __kvm_mips_handle_exit()
1218 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) in __kvm_mips_handle_exit()
1223 ++vcpu->stat.tlbmod_exits; in __kvm_mips_handle_exit()
1224 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); in __kvm_mips_handle_exit()
1229 cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc, in __kvm_mips_handle_exit()
1232 ++vcpu->stat.tlbmiss_st_exits; in __kvm_mips_handle_exit()
1233 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); in __kvm_mips_handle_exit()
1240 ++vcpu->stat.tlbmiss_ld_exits; in __kvm_mips_handle_exit()
1241 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); in __kvm_mips_handle_exit()
1245 ++vcpu->stat.addrerr_st_exits; in __kvm_mips_handle_exit()
1246 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); in __kvm_mips_handle_exit()
1250 ++vcpu->stat.addrerr_ld_exits; in __kvm_mips_handle_exit()
1251 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); in __kvm_mips_handle_exit()
1255 ++vcpu->stat.syscall_exits; in __kvm_mips_handle_exit()
1256 ret = kvm_mips_callbacks->handle_syscall(vcpu); in __kvm_mips_handle_exit()
1260 ++vcpu->stat.resvd_inst_exits; in __kvm_mips_handle_exit()
1261 ret = kvm_mips_callbacks->handle_res_inst(vcpu); in __kvm_mips_handle_exit()
1265 ++vcpu->stat.break_inst_exits; in __kvm_mips_handle_exit()
1266 ret = kvm_mips_callbacks->handle_break(vcpu); in __kvm_mips_handle_exit()
1270 ++vcpu->stat.trap_inst_exits; in __kvm_mips_handle_exit()
1271 ret = kvm_mips_callbacks->handle_trap(vcpu); in __kvm_mips_handle_exit()
1275 ++vcpu->stat.msa_fpe_exits; in __kvm_mips_handle_exit()
1276 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); in __kvm_mips_handle_exit()
1280 ++vcpu->stat.fpe_exits; in __kvm_mips_handle_exit()
1281 ret = kvm_mips_callbacks->handle_fpe(vcpu); in __kvm_mips_handle_exit()
1285 ++vcpu->stat.msa_disabled_exits; in __kvm_mips_handle_exit()
1286 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); in __kvm_mips_handle_exit()
1291 ret = kvm_mips_callbacks->handle_guest_exit(vcpu); in __kvm_mips_handle_exit()
1301 kvm_read_c0_guest_status(&vcpu->arch.cop0)); in __kvm_mips_handle_exit()
1303 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in __kvm_mips_handle_exit()
1320 run->exit_reason = KVM_EXIT_INTR; in __kvm_mips_handle_exit()
1321 ret = (-EINTR << 2) | RESUME_HOST; in __kvm_mips_handle_exit()
1322 ++vcpu->stat.signal_exits; in __kvm_mips_handle_exit()
1332 * callback is not reordered ahead of the write to vcpu->mode, in __kvm_mips_handle_exit()
1334 * the VCPU as outside of guest mode and not needing an IPI. in __kvm_mips_handle_exit()
1336 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in __kvm_mips_handle_exit()
1338 kvm_mips_callbacks->vcpu_reenter(vcpu); in __kvm_mips_handle_exit()
1349 if (kvm_mips_guest_has_fpu(&vcpu->arch) && in __kvm_mips_handle_exit()
1351 __kvm_restore_fcsr(&vcpu->arch); in __kvm_mips_handle_exit()
1353 if (kvm_mips_guest_has_msa(&vcpu->arch) && in __kvm_mips_handle_exit()
1355 __kvm_restore_msacsr(&vcpu->arch); in __kvm_mips_handle_exit()
1374 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_own_fpu()
1383 * FR=0 FPU state, and we don't want to hit reserved instruction in kvm_own_fpu()
1388 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_own_fpu()
1403 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { in kvm_own_fpu()
1404 __kvm_restore_fpu(&vcpu->arch); in kvm_own_fpu()
1405 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; in kvm_own_fpu()
1418 struct mips_coproc *cop0 = &vcpu->arch.cop0; in kvm_own_msa()
1427 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_own_msa()
1435 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | in kvm_own_msa()
1450 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { in kvm_own_msa()
1455 __kvm_restore_msa_upper(&vcpu->arch); in kvm_own_msa()
1456 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; in kvm_own_msa()
1461 __kvm_restore_msa(&vcpu->arch); in kvm_own_msa()
1462 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; in kvm_own_msa()
1463 if (kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_own_msa()
1464 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; in kvm_own_msa()
1481 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_drop_fpu()
1484 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; in kvm_drop_fpu()
1486 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_drop_fpu()
1489 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; in kvm_drop_fpu()
1501 * This is why we explicitly re-enable the hardware before saving. in kvm_lose_fpu()
1505 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_lose_fpu()
1506 __kvm_save_msa(&vcpu->arch); in kvm_lose_fpu()
1511 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_lose_fpu()
1515 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); in kvm_lose_fpu()
1516 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_lose_fpu()
1517 __kvm_save_fpu(&vcpu->arch); in kvm_lose_fpu()
1518 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; in kvm_lose_fpu()
1537 struct pt_regs *regs = args->regs; in kvm_mips_csr_die_notify()
1545 if (!(current->flags & PF_VCPU)) in kvm_mips_csr_die_notify()
1611 return -EOPNOTSUPP; in kvm_mips_init()