Lines Matching full:vcpu

55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)  in kvm_arch_dy_runnable()  argument
57 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable()
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument
95 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter()
96 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
101 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
104 * Reading vcpu->requests must happen after setting vcpu->mode, in kvmppc_prepare_to_enter()
109 * to the page tables done while the VCPU is running. in kvmppc_prepare_to_enter()
114 if (kvm_request_pending(vcpu)) { in kvmppc_prepare_to_enter()
117 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter()
118 r = kvmppc_core_check_requests(vcpu); in kvmppc_prepare_to_enter()
125 if (kvmppc_core_prepare_to_enter(vcpu)) { in kvmppc_prepare_to_enter()
142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) in kvmppc_swab_shared() argument
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) in kvmppc_kvm_pv() argument
164 int nr = kvmppc_get_gpr(vcpu, 11); in kvmppc_kvm_pv()
166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); in kvmppc_kvm_pv()
167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); in kvmppc_kvm_pv()
168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); in kvmppc_kvm_pv()
169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); in kvmppc_kvm_pv()
172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { in kvmppc_kvm_pv()
186 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
188 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
189 kvmppc_swab_shared(vcpu); in kvmppc_kvm_pv()
190 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
199 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
211 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
212 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
213 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
214 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
218 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
221 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
240 kvm_vcpu_halt(vcpu); in kvmppc_kvm_pv()
247 kvmppc_set_gpr(vcpu, 4, r2); in kvmppc_kvm_pv()
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) in kvmppc_sanity_check() argument
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio() argument
287 er = kvmppc_emulate_loadstore(vcpu); in kvmppc_emulate_mmio()
298 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio()
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_emulate_mmio()
321 if (vcpu->mmio_is_write) in kvmppc_emulate_mmio()
324 kvmppc_core_queue_data_storage(vcpu, in kvmppc_emulate_mmio()
325 kvmppc_get_msr(vcpu) & SRR1_PREFIXED, in kvmppc_emulate_mmio()
326 vcpu->arch.vaddr_accessed, dsisr); in kvmppc_emulate_mmio()
333 kvmppc_core_queue_program(vcpu, 0); in kvmppc_emulate_mmio()
348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_st() argument
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
355 vcpu->stat.st++; in kvmppc_st()
357 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
364 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_st()
375 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_st()
377 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_st()
378 void *magic = vcpu->arch.shared; in kvmppc_st()
384 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_st()
391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_ld() argument
394 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
398 vcpu->stat.ld++; in kvmppc_ld()
400 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
407 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_ld()
421 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_ld()
423 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_ld()
424 void *magic = vcpu->arch.shared; in kvmppc_ld()
430 kvm_vcpu_srcu_read_lock(vcpu); in kvmppc_ld()
431 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); in kvmppc_ld()
432 kvm_vcpu_srcu_read_unlock(vcpu); in kvmppc_ld()
766 struct kvm_vcpu *vcpu; in kvmppc_decrementer_wakeup() local
768 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
769 kvmppc_decrementer_func(vcpu); in kvmppc_decrementer_wakeup()
774 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
778 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
779 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
782 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
784 err = kvmppc_subarch_vcpu_init(vcpu); in kvm_arch_vcpu_create()
788 err = kvmppc_core_vcpu_create(vcpu); in kvm_arch_vcpu_create()
792 rcuwait_init(&vcpu->arch.wait); in kvm_arch_vcpu_create()
793 vcpu->arch.waitp = &vcpu->arch.wait; in kvm_arch_vcpu_create()
797 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
801 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
805 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
807 /* Make sure we're not using the vcpu anymore */ in kvm_arch_vcpu_destroy()
808 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
810 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
812 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
816 kvmppc_xive_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
818 kvmppc_xics_free_icp(vcpu); in kvm_arch_vcpu_destroy()
821 kvmppc_xive_native_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
825 kvmppc_core_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
827 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
830 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
832 return kvmppc_core_pending_dec(vcpu); in kvm_cpu_has_pending_timer()
835 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
845 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
847 kvmppc_core_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
850 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
852 kvmppc_core_vcpu_put(vcpu); in kvm_arch_vcpu_put()
854 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
926 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword() argument
930 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
931 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
937 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword()
939 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword()
941 VCPU_VSX_FPR(vcpu, index, offset) = gpr; in kvmppc_set_vsr_dword()
945 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword_dump() argument
949 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
952 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword_dump()
955 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword_dump()
957 VCPU_VSX_FPR(vcpu, index, 0) = gpr; in kvmppc_set_vsr_dword_dump()
958 VCPU_VSX_FPR(vcpu, index, 1) = gpr; in kvmppc_set_vsr_dword_dump()
962 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word_dump() argument
966 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
973 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word_dump()
977 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; in kvmppc_set_vsr_word_dump()
978 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; in kvmppc_set_vsr_word_dump()
982 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word() argument
986 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
987 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
994 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_word()
996 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word()
1000 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); in kvmppc_set_vsr_word()
1002 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; in kvmppc_set_vsr_word()
1008 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_offset_generic() argument
1017 if (kvmppc_need_byteswap(vcpu)) in kvmppc_get_vmx_offset_generic()
1025 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_dword_offset() argument
1028 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); in kvmppc_get_vmx_dword_offset()
1031 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_word_offset() argument
1034 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); in kvmppc_get_vmx_word_offset()
1037 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_hword_offset() argument
1040 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); in kvmppc_get_vmx_hword_offset()
1043 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_byte_offset() argument
1046 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); in kvmppc_get_vmx_byte_offset()
1050 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_dword() argument
1054 int offset = kvmppc_get_vmx_dword_offset(vcpu, in kvmppc_set_vmx_dword()
1055 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1056 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1061 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_dword()
1063 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_dword()
1066 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_word() argument
1070 int offset = kvmppc_get_vmx_word_offset(vcpu, in kvmppc_set_vmx_word()
1071 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1072 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1077 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_word()
1079 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_word()
1082 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_hword() argument
1086 int offset = kvmppc_get_vmx_hword_offset(vcpu, in kvmppc_set_vmx_hword()
1087 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1088 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1093 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_hword()
1095 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_hword()
1098 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_byte() argument
1102 int offset = kvmppc_get_vmx_byte_offset(vcpu, in kvmppc_set_vmx_byte()
1103 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1104 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1109 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_byte()
1111 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_byte()
1145 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) in kvmppc_complete_mmio_load() argument
1147 struct kvm_run *run = vcpu->run; in kvmppc_complete_mmio_load()
1153 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1170 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1173 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1189 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1191 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1194 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1195 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1197 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1201 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1204 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1205 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1210 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1211 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1213 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1214 kvmppc_set_vsr_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1215 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1216 kvmppc_set_vsr_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1217 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1219 kvmppc_set_vsr_dword_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1220 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1222 kvmppc_set_vsr_word_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1227 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1228 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1230 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1231 kvmppc_set_vmx_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1232 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1233 kvmppc_set_vmx_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1234 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1236 kvmppc_set_vmx_hword(vcpu, gpr); in kvmppc_complete_mmio_load()
1237 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1239 kvmppc_set_vmx_byte(vcpu, gpr); in kvmppc_complete_mmio_load()
1244 if (kvmppc_need_byteswap(vcpu)) in kvmppc_complete_mmio_load()
1246 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1255 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, in __kvmppc_handle_load() argument
1259 struct kvm_run *run = vcpu->run; in __kvmppc_handle_load()
1264 if (kvmppc_need_byteswap(vcpu)) { in __kvmppc_handle_load()
1273 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1277 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1278 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1279 vcpu->mmio_needed = 1; in __kvmppc_handle_load()
1280 vcpu->mmio_is_write = 0; in __kvmppc_handle_load()
1281 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1283 idx = srcu_read_lock(&vcpu->kvm->srcu); in __kvmppc_handle_load()
1285 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in __kvmppc_handle_load()
1288 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __kvmppc_handle_load()
1291 kvmppc_complete_mmio_load(vcpu); in __kvmppc_handle_load()
1292 vcpu->mmio_needed = 0; in __kvmppc_handle_load()
1299 int kvmppc_handle_load(struct kvm_vcpu *vcpu, in kvmppc_handle_load() argument
1303 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); in kvmppc_handle_load()
1308 int kvmppc_handle_loads(struct kvm_vcpu *vcpu, in kvmppc_handle_loads() argument
1312 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); in kvmppc_handle_loads()
1316 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_load() argument
1323 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1326 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1327 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vsx_load()
1333 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1335 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1336 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1342 int kvmppc_handle_store(struct kvm_vcpu *vcpu, in kvmppc_handle_store() argument
1345 struct kvm_run *run = vcpu->run; in kvmppc_handle_store()
1351 if (kvmppc_need_byteswap(vcpu)) { in kvmppc_handle_store()
1360 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1363 vcpu->mmio_needed = 1; in kvmppc_handle_store()
1364 vcpu->mmio_is_write = 1; in kvmppc_handle_store()
1366 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1386 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_store()
1388 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
1391 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_store()
1394 vcpu->mmio_needed = 0; in kvmppc_handle_store()
1403 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) in kvmppc_get_vsr_data() argument
1408 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1414 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1422 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); in kvmppc_get_vsr_data()
1424 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1431 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1441 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); in kvmppc_get_vsr_data()
1444 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1457 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_store() argument
1463 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1466 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1469 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1470 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) in kvmppc_handle_vsx_store()
1473 emulated = kvmppc_handle_store(vcpu, in kvmppc_handle_vsx_store()
1479 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1481 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1482 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1488 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vsx_loadstore() argument
1490 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vsx_loadstore()
1494 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1496 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vsx_loadstore()
1497 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1498 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1500 emulated = kvmppc_handle_vsx_store(vcpu, in kvmppc_emulate_mmio_vsx_loadstore()
1501 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1524 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_load() argument
1529 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_load()
1532 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1533 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vmx_load()
1539 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1540 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1541 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1547 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_dword() argument
1554 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1559 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_dword()
1565 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_word() argument
1572 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1577 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_word()
1583 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_hword() argument
1590 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1595 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_hword()
1601 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_byte() argument
1608 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1613 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_byte()
1619 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_store() argument
1626 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_store()
1629 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1631 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1632 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1634 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1639 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1643 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1647 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1654 emulated = kvmppc_handle_store(vcpu, val, bytes, in kvmppc_handle_vmx_store()
1659 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1660 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1661 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1667 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vmx_loadstore() argument
1669 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vmx_loadstore()
1673 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1675 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vmx_loadstore()
1676 emulated = kvmppc_handle_vmx_load(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1677 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1679 emulated = kvmppc_handle_vmx_store(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1680 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1702 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_get_one_reg() argument
1712 r = kvmppc_get_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_get_one_reg()
1722 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; in kvm_vcpu_ioctl_get_one_reg()
1729 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); in kvm_vcpu_ioctl_get_one_reg()
1732 val = get_reg_val(reg->id, vcpu->arch.vrsave); in kvm_vcpu_ioctl_get_one_reg()
1750 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_set_one_reg() argument
1763 r = kvmppc_set_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_set_one_reg()
1773 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; in kvm_vcpu_ioctl_set_one_reg()
1780 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1787 vcpu->arch.vrsave = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1799 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1801 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1804 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1806 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1807 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1808 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1809 kvmppc_complete_mmio_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1811 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1812 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1813 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1816 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1817 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1819 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1825 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1826 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1827 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1830 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1831 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1833 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1838 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1843 kvmppc_set_gpr(vcpu, i, gprs[i]); in kvm_arch_vcpu_ioctl_run()
1844 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1845 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1848 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1850 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1851 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1853 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1854 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1855 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1859 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1864 r = kvmppc_vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
1866 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1879 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
1883 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1886 kvmppc_core_dequeue_external(vcpu); in kvm_vcpu_ioctl_interrupt()
1890 kvmppc_core_queue_external(vcpu, irq); in kvm_vcpu_ioctl_interrupt()
1892 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1897 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
1908 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1912 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1917 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1919 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1924 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1936 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); in kvm_vcpu_ioctl_enable_cap()
1953 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1973 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1975 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
2001 r = kvmppc_xive_native_connect_vcpu(dev, vcpu, in kvm_vcpu_ioctl_enable_cap()
2011 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
2014 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
2023 r = kvmppc_sanity_check(vcpu); in kvm_vcpu_ioctl_enable_cap()
2041 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
2047 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
2056 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
2063 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
2071 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
2082 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2083 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
2084 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2096 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
2098 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
2108 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2109 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); in kvm_arch_vcpu_ioctl()
2110 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2122 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
2543 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) in kvm_arch_create_vcpu_debugfs() argument
2545 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) in kvm_arch_create_vcpu_debugfs()
2546 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); in kvm_arch_create_vcpu_debugfs()