/openbmc/linux/arch/riscv/kvm/ |
H A D | vcpu_exit.c | 70 old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus); in kvm_riscv_vcpu_unpriv_read() 142 if (vcpu->arch.guest_context.sstatus & SR_SPP) in kvm_riscv_vcpu_trap_redirect() 162 vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC); in kvm_riscv_vcpu_trap_redirect() 165 vcpu->arch.guest_context.sstatus |= SR_SPP; in kvm_riscv_vcpu_trap_redirect() 188 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) { in kvm_riscv_vcpu_exit() 194 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) in kvm_riscv_vcpu_exit() 200 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) in kvm_riscv_vcpu_exit() 204 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) in kvm_riscv_vcpu_exit() 215 vcpu->arch.guest_context.sepc, in kvm_riscv_vcpu_exit() 216 vcpu->arch.guest_context.sstatus, in kvm_riscv_vcpu_exit() [all …]
|
H A D | vcpu_insn.c | 157 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_illegal_insn() 173 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_virtual_insn() 241 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_csr_return() 245 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in kvm_riscv_vcpu_csr_return() 255 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); in csr_insn() 390 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in system_opcode_insn() 419 ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_virtual_insn() 461 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_load() 587 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_store() 613 data = GET_RS2(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store() [all …]
|
H A D | vcpu_sbi_replace.c | 20 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_time_handler() 51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_ipi_handler() 95 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_rfence_handler() 150 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_srst_handler()
|
H A D | vcpu_sbi_hsm.c | 18 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_start() 72 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_get_status() 91 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_hsm_handler()
|
H A D | vcpu_fp.c | 19 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_fp_reset() 81 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_fp() 126 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_fp()
|
H A D | vcpu.c | 49 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu() 208 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel() 539 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load() 542 kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load() 558 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put() 563 kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put() 763 trap.sepc = vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_ioctl_run()
|
H A D | vcpu_sbi.c | 81 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_forward() 121 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_return() 133 vcpu->arch.guest_context.sepc += 4; in kvm_riscv_vcpu_sbi_return() 365 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_ecall()
|
H A D | vcpu_vector.c | 22 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vector_reset() 99 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vreg_addr()
|
H A D | vcpu_sbi_base.c | 19 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_base_handler()
|
H A D | vcpu_sbi_pmu.c | 20 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_pmu_handler()
|
H A D | vcpu_sbi_v01.c | 24 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_v01_handler()
|
H A D | vcpu_onereg.c | 275 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_core() 308 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_core() 769 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in num_fp_f_regs() 798 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in num_fp_d_regs()
|
H A D | aia_imsic.c | 828 vcpu->arch.guest_context.hstatus &= ~HSTATUS_VGEIN; in kvm_riscv_vcpu_aia_imsic_update() 830 vcpu->arch.guest_context.hstatus |= in kvm_riscv_vcpu_aia_imsic_update()
|
/openbmc/linux/arch/riscv/kernel/ |
H A D | asm-offsets.c | 123 OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero); in asm_offsets() 124 OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra); in asm_offsets() 125 OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp); in asm_offsets() 126 OFFSET(KVM_ARCH_GUEST_GP, kvm_vcpu_arch, guest_context.gp); in asm_offsets() 127 OFFSET(KVM_ARCH_GUEST_TP, kvm_vcpu_arch, guest_context.tp); in asm_offsets() 128 OFFSET(KVM_ARCH_GUEST_T0, kvm_vcpu_arch, guest_context.t0); in asm_offsets() 129 OFFSET(KVM_ARCH_GUEST_T1, kvm_vcpu_arch, guest_context.t1); in asm_offsets() 130 OFFSET(KVM_ARCH_GUEST_T2, kvm_vcpu_arch, guest_context.t2); in asm_offsets() 131 OFFSET(KVM_ARCH_GUEST_S0, kvm_vcpu_arch, guest_context.s0); in asm_offsets() 132 OFFSET(KVM_ARCH_GUEST_S1, kvm_vcpu_arch, guest_context.s1); in asm_offsets() [all …]
|
/openbmc/linux/arch/riscv/include/asm/ |
H A D | kvm_host.h | 191 struct kvm_cpu_context guest_context; member
|