/openbmc/linux/arch/riscv/kvm/ |
H A D | vcpu.c | 26 STATS_DESC_COUNTER(VCPU, ecall_exit_stat), 27 STATS_DESC_COUNTER(VCPU, wfi_exit_stat), 28 STATS_DESC_COUNTER(VCPU, mmio_exit_user), 29 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), 30 STATS_DESC_COUNTER(VCPU, csr_exit_user), 31 STATS_DESC_COUNTER(VCPU, csr_exit_kernel), 32 STATS_DESC_COUNTER(VCPU, signal_exits), 33 STATS_DESC_COUNTER(VCPU, exits) 45 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) in kvm_riscv_reset_vcpu() argument 47 struct kvm_vcpu_csr *csr = &vcpu in kvm_riscv_reset_vcpu() 97 kvm_arch_vcpu_create(struct kvm_vcpu * vcpu) kvm_arch_vcpu_create() argument 150 kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu) kvm_arch_vcpu_postcreate() argument 161 kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu) kvm_arch_vcpu_destroy() argument 178 kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu) kvm_cpu_has_pending_timer() argument 183 kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu) kvm_arch_vcpu_blocking() argument 188 kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu) kvm_arch_vcpu_unblocking() argument 193 kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu) kvm_arch_vcpu_runnable() argument 199 kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu) kvm_arch_vcpu_should_kick() argument 204 kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu) kvm_arch_vcpu_in_kernel() argument 209 kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf) kvm_arch_vcpu_fault() argument 217 struct kvm_vcpu *vcpu = filp->private_data; kvm_arch_vcpu_async_ioctl() local 238 struct kvm_vcpu *vcpu = filp->private_data; kvm_arch_vcpu_ioctl() local 282 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs) kvm_arch_vcpu_ioctl_get_sregs() argument 288 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs) kvm_arch_vcpu_ioctl_set_sregs() argument 294 kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu) kvm_arch_vcpu_ioctl_get_fpu() argument 299 kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu) kvm_arch_vcpu_ioctl_set_fpu() argument 304 kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr) kvm_arch_vcpu_ioctl_translate() argument 310 kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs) kvm_arch_vcpu_ioctl_get_regs() argument 315 kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs) kvm_arch_vcpu_ioctl_set_regs() argument 320 kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu * vcpu) kvm_riscv_vcpu_flush_interrupts() argument 337 kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu * vcpu) kvm_riscv_vcpu_sync_interrupts() argument 367 kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu * vcpu,unsigned int irq) kvm_riscv_vcpu_set_interrupt() argument 389 kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu * vcpu,unsigned int irq) kvm_riscv_vcpu_unset_interrupt() argument 409 kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu * vcpu,u64 mask) kvm_riscv_vcpu_has_interrupts() argument 424 kvm_riscv_vcpu_power_off(struct kvm_vcpu * vcpu) kvm_riscv_vcpu_power_off() argument 431 kvm_riscv_vcpu_power_on(struct kvm_vcpu * vcpu) kvm_riscv_vcpu_power_on() argument 437 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state) kvm_arch_vcpu_ioctl_get_mpstate() argument 448 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state) kvm_arch_vcpu_ioctl_set_mpstate() argument 467 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg) kvm_arch_vcpu_ioctl_set_guest_debug() argument 496 kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu) kvm_arch_vcpu_load() argument 528 kvm_arch_vcpu_put(struct kvm_vcpu * vcpu) kvm_arch_vcpu_put() argument 556 kvm_riscv_check_vcpu_requests(struct kvm_vcpu * vcpu) kvm_riscv_check_vcpu_requests() argument 601 kvm_riscv_update_hvip(struct kvm_vcpu * vcpu) kvm_riscv_update_hvip() argument 616 kvm_riscv_vcpu_enter_exit(struct kvm_vcpu * vcpu) kvm_riscv_vcpu_enter_exit() argument 624 kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu) kvm_arch_vcpu_ioctl_run() argument [all...] |
/openbmc/linux/arch/powerpc/kvm/ |
H A D | booke.c | 56 STATS_DESC_COUNTER(VCPU, sum_exits), 57 STATS_DESC_COUNTER(VCPU, mmio_exits), 58 STATS_DESC_COUNTER(VCPU, signal_exits), 59 STATS_DESC_COUNTER(VCPU, light_exits), 60 STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits), 61 STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits), 62 STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits), 63 STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits), 64 STATS_DESC_COUNTER(VCPU, syscall_exits), 65 STATS_DESC_COUNTER(VCPU, isi_exits), [all …]
|
H A D | book3s_emulate.c | 70 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) in spr_allowed() argument 73 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed() 77 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) in spr_allowed() 84 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) in kvmppc_copyto_vcpu_tm() argument 86 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], in kvmppc_copyto_vcpu_tm() 87 sizeof(vcpu->arch.gpr_tm)); in kvmppc_copyto_vcpu_tm() 88 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, in kvmppc_copyto_vcpu_tm() 90 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, in kvmppc_copyto_vcpu_tm() 92 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm() 93 vcpu->arch.dscr_tm = vcpu->arch.dscr; in kvmppc_copyto_vcpu_tm() [all …]
|
H A D | booke_emulate.c | 24 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfi() argument 26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi() 27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi() 30 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfdi() argument 32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi() 33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi() 36 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) in kvmppc_emul_rfci() argument 38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci() 39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci() 42 int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, in kvmppc_booke_emulate_op() argument [all …]
|
H A D | book3s.c | 58 STATS_DESC_COUNTER(VCPU, sum_exits), 59 STATS_DESC_COUNTER(VCPU, mmio_exits), 60 STATS_DESC_COUNTER(VCPU, signal_exits), 61 STATS_DESC_COUNTER(VCPU, light_exits), 62 STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits), 63 STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits), 64 STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits), 65 STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits), 66 STATS_DESC_COUNTER(VCPU, syscall_exits), 67 STATS_DESC_COUNTER(VCPU, isi_exits), [all …]
|
H A D | book3s_pr.c | 53 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 56 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); 67 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) in kvmppc_is_split_real() argument 69 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_is_split_real() 73 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_fixup_split_real() argument 75 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_fixup_split_real() 76 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_fixup_split_real() 83 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real() 90 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real() 91 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); in kvmppc_fixup_split_real() [all …]
|
H A D | emulate_loadstore.c | 28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_fp_disabled() argument 30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { in kvmppc_check_fp_disabled() 31 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_fp_disabled() 40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_vsx_disabled() argument 42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { in kvmppc_check_vsx_disabled() 43 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_vsx_disabled() 52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_altivec_disabled() argument 54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { in kvmppc_check_altivec_disabled() 55 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_altivec_disabled() 72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_loadstore() argument [all …]
|
H A D | book3s_paired_singles.c | 150 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) in kvmppc_sync_qpr() argument 152 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); in kvmppc_sync_qpr() 155 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) in kvmppc_inject_pf() argument 158 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_inject_pf() 162 kvmppc_set_msr(vcpu, msr); in kvmppc_inject_pf() 163 kvmppc_set_dar(vcpu, eaddr); in kvmppc_inject_pf() 168 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_inject_pf() 169 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); in kvmppc_inject_pf() 172 static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu, in kvmppc_emulate_fpr_load() argument 184 r = kvmppc_ld(vcpu, &addr, len, tmp, true); in kvmppc_emulate_fpr_load() [all …]
|
H A D | powerpc.c | 55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument 57 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable() 60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument 65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument 79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument 95 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter() 96 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter() 101 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter() 104 * Reading vcpu->requests must happen after setting vcpu->mode, in kvmppc_prepare_to_enter() 109 * to the page tables done while the VCPU is running. in kvmppc_prepare_to_enter() [all …]
|
H A D | book3s_pr_papr.c | 23 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) in get_pteg_addr() argument 25 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in get_pteg_addr() 36 static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) in kvmppc_h_pr_enter() argument 38 long flags = kvmppc_get_gpr(vcpu, 4); in kvmppc_h_pr_enter() 39 long pte_index = kvmppc_get_gpr(vcpu, 5); in kvmppc_h_pr_enter() 47 pteg_addr = get_pteg_addr(vcpu, pte_index); in kvmppc_h_pr_enter() 49 mutex_lock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_h_pr_enter() 70 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); in kvmppc_h_pr_enter() 71 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); in kvmppc_h_pr_enter() 76 kvmppc_set_gpr(vcpu, 4, pte_index | i); in kvmppc_h_pr_enter() [all …]
|
/openbmc/linux/arch/s390/kvm/ |
H A D | priv.c | 32 static int handle_ri(struct kvm_vcpu *vcpu) in handle_ri() argument 34 vcpu->stat.instruction_ri++; in handle_ri() 36 if (test_kvm_facility(vcpu->kvm, 64)) { in handle_ri() 37 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); in handle_ri() 38 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri() 39 kvm_s390_retry_instr(vcpu); in handle_ri() 42 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_ri() 45 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) in kvm_s390_handle_aa() argument 47 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa() 48 return handle_ri(vcpu); in kvm_s390_handle_aa() [all …]
|
H A D | intercept.c | 25 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) in kvm_s390_get_ilen() argument 27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_get_ilen() 30 switch (vcpu->arch.sie_block->icptcode) { in kvm_s390_get_ilen() 37 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); in kvm_s390_get_ilen() 47 ilen = vcpu->arch.sie_block->pgmilc & 0x6; in kvm_s390_get_ilen() 53 static int handle_stop(struct kvm_vcpu *vcpu) in handle_stop() argument 55 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; in handle_stop() 59 vcpu->stat.exit_stop_request++; in handle_stop() 62 if (kvm_s390_vcpu_has_irq(vcpu, 1)) in handle_stop() 68 stop_pending = kvm_s390_is_stop_irq_pending(vcpu); in handle_stop() [all …]
|
H A D | diag.c | 20 static int diag_release_pages(struct kvm_vcpu *vcpu) in diag_release_pages() argument 23 unsigned long prefix = kvm_s390_get_prefix(vcpu); in diag_release_pages() 25 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages() 26 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; in diag_release_pages() 27 vcpu->stat.instruction_diagnose_10++; in diag_release_pages() 31 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in diag_release_pages() 33 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); in diag_release_pages() 40 gmap_discard(vcpu->arch.gmap, start, end); in diag_release_pages() 48 gmap_discard(vcpu->arch.gmap, start, prefix); in diag_release_pages() 50 gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE); in diag_release_pages() [all …]
|
H A D | kvm-s390.h | 24 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) argument 26 #define IS_ITDB_VALID(vcpu) \ argument 27 ((*(char *)phys_to_virt((vcpu)->arch.sie_block->itdba) == TDB_FORMAT1)) 61 static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_set_cpuflags() argument 63 atomic_or(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_set_cpuflags() 66 static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_clear_cpuflags() argument 68 atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_clear_cpuflags() 71 static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_test_cpuflags() argument 73 return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags; in kvm_s390_test_cpuflags() 76 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) in is_vcpu_stopped() argument [all …]
|
H A D | guestdbg.c | 59 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) in enable_all_hw_bp() argument 62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp() 63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp() 64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp() 67 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp() 68 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp() 79 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp() 80 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp() 81 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp() 99 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) in enable_all_hw_wp() argument [all …]
|
H A D | sigp.c | 20 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, in __sigp_sense() argument 39 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, in __sigp_sense() 44 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, in __inject_sigp_emergency() argument 49 .u.emerg.code = vcpu->vcpu_id, in __inject_sigp_emergency() 55 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", in __inject_sigp_emergency() 61 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) in __sigp_emergency() argument 63 return __inject_sigp_emergency(vcpu, dst_vcpu); in __sigp_emergency() 66 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, in __sigp_conditional_emergency() argument 75 idle = is_vcpu_idle(vcpu); in __sigp_conditional_emergency() 81 if (!is_vcpu_stopped(vcpu) in __sigp_conditional_emergency() [all …]
|
/openbmc/linux/arch/arm64/include/asm/ |
H A D | kvm_emulate.h | 42 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 43 void kvm_skip_instr32(struct kvm_vcpu *vcpu); 45 void kvm_inject_undefined(struct kvm_vcpu *vcpu); 46 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 47 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 48 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 49 void kvm_inject_size_fault(struct kvm_vcpu *vcpu); 51 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); 53 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu); 54 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2); [all …]
|
/openbmc/linux/arch/x86/kvm/ |
H A D | x86.c | 103 ((struct kvm_vcpu *)(ctxt)->vcpu) 125 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 126 static void process_nmi(struct kvm_vcpu *vcpu); 127 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 128 static void store_regs(struct kvm_vcpu *vcpu); 129 static int sync_regs(struct kvm_vcpu *vcpu); 130 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu); 132 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 133 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 272 STATS_DESC_COUNTER(VCPU, pf_taken), [all …]
|
H A D | kvm_cache_regs.h | 19 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ 21 return vcpu->arch.regs[VCPU_REGS_##uname]; \ 23 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \ 26 vcpu->arch.regs[VCPU_REGS_##uname] = val; \ 50 * 1 0 register in vcpu->arch in BUILD_KVM_GPR_ACCESSORS() 51 * 1 1 register in vcpu->arch, needs to be stored back in BUILD_KVM_GPR_ACCESSORS() 53 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS() 56 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in BUILD_KVM_GPR_ACCESSORS() 59 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, in kvm_register_is_dirty() argument 62 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_is_dirty() [all …]
|
H A D | smm.c | 112 void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) in kvm_smm_changed() argument 114 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); in kvm_smm_changed() 117 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_smm_changed() 119 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); in kvm_smm_changed() 122 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_smm_changed() 129 vcpu->arch.pdptrs_from_userspace = false; in kvm_smm_changed() 132 kvm_mmu_reset_context(vcpu); in kvm_smm_changed() 135 void process_smi(struct kvm_vcpu *vcpu) in process_smi() argument 137 vcpu->arch.smi_pending = true; in process_smi() 138 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_smi() [all …]
|
H A D | x86.h | 92 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); 93 int kvm_check_nested_events(struct kvm_vcpu *vcpu); 95 static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu) in kvm_vcpu_has_run() argument 97 return vcpu->arch.last_vmentry_cpu != -1; in kvm_vcpu_has_run() 100 static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu) in kvm_is_exception_pending() argument 102 return vcpu->arch.exception.pending || in kvm_is_exception_pending() 103 vcpu->arch.exception_vmexit.pending || in kvm_is_exception_pending() 104 kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_is_exception_pending() 107 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) in kvm_clear_exception_queue() argument 109 vcpu->arch.exception.pending = false; in kvm_clear_exception_queue() [all …]
|
/openbmc/linux/arch/arm64/kvm/ |
H A D | inject_fault.c | 18 static void pend_sync_exception(struct kvm_vcpu *vcpu) in pend_sync_exception() argument 21 if (likely(!vcpu_has_nv(vcpu))) { in pend_sync_exception() 22 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); in pend_sync_exception() 32 switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) { in pend_sync_exception() 35 kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); in pend_sync_exception() 39 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); in pend_sync_exception() 42 if (vcpu_el2_tge_is_set(vcpu)) in pend_sync_exception() 43 kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); in pend_sync_exception() 45 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); in pend_sync_exception() 52 static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target) in match_target_el() argument [all …]
|
H A D | debug.c | 38 * for the vcpu after the single-step is disabled. 40 static void save_guest_debug_regs(struct kvm_vcpu *vcpu) in save_guest_debug_regs() argument 42 u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1); in save_guest_debug_regs() 44 vcpu->arch.guest_debug_preserved.mdscr_el1 = val; in save_guest_debug_regs() 47 vcpu->arch.guest_debug_preserved.mdscr_el1); in save_guest_debug_regs() 49 vcpu->arch.guest_debug_preserved.pstate_ss = in save_guest_debug_regs() 50 (*vcpu_cpsr(vcpu) & DBG_SPSR_SS); in save_guest_debug_regs() 53 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) in restore_guest_debug_regs() argument 55 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1; in restore_guest_debug_regs() 57 vcpu_write_sys_reg(vcpu, val, MDSCR_EL1); in restore_guest_debug_regs() [all …]
|
/openbmc/linux/include/kvm/ |
H A D | arm_pmu.h | 49 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 50 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 51 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); 52 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); 53 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); 54 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 55 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 56 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 57 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 58 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); [all …]
|
/openbmc/linux/arch/mips/kvm/ |
H A D | emulate.c | 40 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, in kvm_compute_return_epc() argument 45 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc() 56 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); in kvm_compute_return_epc() 243 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) in update_pc() argument 248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc() 249 &vcpu->arch.pc); in update_pc() 253 vcpu->arch.pc += 4; in update_pc() 256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc() 264 * @vcpu: KVM VCPU information. 272 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) in kvm_get_badinstr() argument [all …]
|