Lines Matching refs:arch

52 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);  in kvm_arch_vcpu_runnable()
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
186 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
188 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
190 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
199 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
211 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
212 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
213 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
214 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
218 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
221 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
326 vcpu->arch.vaddr_accessed, dsisr); in kvmppc_emulate_mmio()
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
357 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
378 void *magic = vcpu->arch.shared; in kvmppc_st()
394 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
400 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
424 void *magic = vcpu->arch.shared; in kvmppc_ld()
469 kvm->arch.kvm_ops = kvm_ops; in kvm_arch_init_vm()
499 module_put(kvm->arch.kvm_ops->owner); in kvm_arch_destroy_vm()
595 if (kvm->arch.emul_smt_mode > 1) in kvm_vm_ioctl_check_extension()
596 r = kvm->arch.emul_smt_mode; in kvm_vm_ioctl_check_extension()
598 r = kvm->arch.smt_mode; in kvm_vm_ioctl_check_extension()
768 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
778 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
779 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
782 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
792 rcuwait_init(&vcpu->arch.wait); in kvm_arch_vcpu_create()
793 vcpu->arch.waitp = &vcpu->arch.wait; in kvm_arch_vcpu_create()
808 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
810 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
812 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
845 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
854 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
877 if (kvm->arch.kvm_ops->irq_bypass_add_producer) in kvm_arch_irq_bypass_add_producer()
878 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); in kvm_arch_irq_bypass_add_producer()
890 if (kvm->arch.kvm_ops->irq_bypass_del_producer) in kvm_arch_irq_bypass_del_producer()
891 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); in kvm_arch_irq_bypass_del_producer()
930 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
931 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
949 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
966 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
986 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
987 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
1055 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1056 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1071 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1072 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1087 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1088 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1103 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1104 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1153 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1170 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1173 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1189 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1191 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1194 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1195 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1197 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1201 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1204 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1205 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1210 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1211 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1213 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1215 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1217 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1220 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1227 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1228 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1230 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1232 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1234 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1237 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1246 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1273 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1277 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1278 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1281 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1323 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1326 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1333 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1335 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1336 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1360 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1366 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1408 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1414 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1431 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1463 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1466 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1469 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1479 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1481 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1482 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1494 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1497 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1498 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1501 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1529 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_load()
1532 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1539 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1540 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1541 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1554 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1572 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1590 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1608 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1626 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_store()
1629 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1631 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1632 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1659 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1660 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1661 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1673 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1677 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1680 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1722 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; in kvm_vcpu_ioctl_get_one_reg()
1729 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); in kvm_vcpu_ioctl_get_one_reg()
1732 val = get_reg_val(reg->id, vcpu->arch.vrsave); in kvm_vcpu_ioctl_get_one_reg()
1773 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; in kvm_vcpu_ioctl_set_one_reg()
1780 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1787 vcpu->arch.vrsave = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1811 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1812 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1813 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1816 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1825 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1826 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1827 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1830 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1838 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1844 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1845 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1851 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1853 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1855 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1908 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1912 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1917 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1919 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1924 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
2014 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
2031 if (kvm->arch.mpic) in kvm_arch_intc_initialized()
2035 if (kvm->arch.xics || kvm->arch.xive) in kvm_arch_intc_initialized()
2167 ret = ret || (kvm->arch.mpic != NULL); in kvm_arch_irqchip_in_kernel()
2170 ret = ret || (kvm->arch.xics != NULL); in kvm_arch_irqchip_in_kernel()
2171 ret = ret || (kvm->arch.xive != NULL); in kvm_arch_irqchip_in_kernel()
2210 set_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2212 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2221 if (kvm->arch.kvm_ops->set_smt_mode) in kvm_vm_ioctl_enable_cap()
2222 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); in kvm_vm_ioctl_enable_cap()
2229 !kvm->arch.kvm_ops->enable_nested) in kvm_vm_ioctl_enable_cap()
2231 r = kvm->arch.kvm_ops->enable_nested(kvm); in kvm_vm_ioctl_enable_cap()
2237 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) in kvm_vm_ioctl_enable_cap()
2239 r = kvm->arch.kvm_ops->enable_svm(kvm); in kvm_vm_ioctl_enable_cap()
2243 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) in kvm_vm_ioctl_enable_cap()
2245 r = kvm->arch.kvm_ops->enable_dawr1(kvm); in kvm_vm_ioctl_enable_cap()
2440 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2456 if (!kvm->arch.kvm_ops->configure_mmu) in kvm_arch_vm_ioctl()
2461 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); in kvm_arch_vm_ioctl()
2469 if (!kvm->arch.kvm_ops->get_rmmu_info) in kvm_arch_vm_ioctl()
2471 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2488 if (!kvm->arch.kvm_ops->svm_off) in kvm_arch_vm_ioctl()
2491 r = kvm->arch.kvm_ops->svm_off(kvm); in kvm_arch_vm_ioctl()
2496 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); in kvm_arch_vm_ioctl()
2545 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) in kvm_arch_create_vcpu_debugfs()
2546 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); in kvm_arch_create_vcpu_debugfs()
2551 if (kvm->arch.kvm_ops->create_vm_debugfs) in kvm_arch_create_vm_debugfs()
2552 kvm->arch.kvm_ops->create_vm_debugfs(kvm); in kvm_arch_create_vm_debugfs()