Home
last modified time | relevance | path

Searched refs:kvm_cpu_context (Results 1 – 25 of 36) sorted by relevance

12

/openbmc/linux/arch/riscv/include/asm/
H A Dkvm_vcpu_fp.h15 struct kvm_cpu_context;
18 void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context);
19 void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context);
20 void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context);
21 void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context);
24 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
26 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
28 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx);
29 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx);
34 static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_save()
[all …]
H A Dkvm_vcpu_vector.h19 static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context) in __kvm_riscv_vector_save()
24 static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context) in __kvm_riscv_vector_restore()
30 void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
32 void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
34 void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx);
35 void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx);
37 struct kvm_cpu_context *cntx);
41 struct kvm_cpu_context;
47 static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_save()
52 static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_restore()
[all …]
H A Dkvm_host.h114 struct kvm_cpu_context { struct
188 struct kvm_cpu_context host_context;
191 struct kvm_cpu_context guest_context;
197 struct kvm_cpu_context guest_reset_context;
/openbmc/linux/arch/arm64/kvm/hyp/nvhe/
H A Dhyp-main.c24 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
74 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) in handle___kvm_vcpu_run()
108 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) in handle___kvm_adjust_pc()
115 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt) in handle___kvm_flush_vm_context()
120 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt) in handle___kvm_tlb_flush_vmid_ipa()
129 static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctxt) in handle___kvm_tlb_flush_vmid_ipa_nsh()
139 handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt) in handle___kvm_tlb_flush_vmid_range()
148 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt) in handle___kvm_tlb_flush_vmid()
155 static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt) in handle___kvm_flush_cpu_context()
162 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt) in handle___kvm_timer_set_cntvoff()
[all …]
H A Dpsci-relay.c20 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
72 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt) in psci_forward()
107 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_on()
151 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_suspend()
179 static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_system_suspend()
206 struct kvm_cpu_context *host_ctxt; in __kvm_host_psci_cpu_entry()
224 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_0_1_handler()
236 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_0_2_handler()
262 static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_1_0_handler()
276 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) in kvm_host_psci_handler()
H A Dswitch.c34 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
70 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; in __activate_traps()
248 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run()
249 struct kvm_cpu_context *guest_ctxt; in __kvm_vcpu_run()
365 struct kvm_cpu_context *host_ctxt; in hyp_panic()
H A Dsysreg-sr.c21 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_save_state_nvhe()
29 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_restore_state_nvhe()
H A Dffa.c94 static void ffa_set_retval(struct kvm_cpu_context *ctxt, in ffa_set_retval()
181 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_map()
260 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_unmap()
361 struct kvm_cpu_context *ctxt) in do_ffa_mem_frag_tx()
420 struct kvm_cpu_context *ctxt) in __do_ffa_mem_xfer()
511 struct kvm_cpu_context *ctxt) in do_ffa_mem_reclaim()
618 struct kvm_cpu_context *ctxt) in do_ffa_features()
646 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) in kvm_host_ffa_handler()
/openbmc/linux/arch/riscv/kernel/
H A Dasm-offsets.c207 OFFSET(KVM_ARCH_FP_F_F0, kvm_cpu_context, fp.f.f[0]); in asm_offsets()
208 OFFSET(KVM_ARCH_FP_F_F1, kvm_cpu_context, fp.f.f[1]); in asm_offsets()
209 OFFSET(KVM_ARCH_FP_F_F2, kvm_cpu_context, fp.f.f[2]); in asm_offsets()
210 OFFSET(KVM_ARCH_FP_F_F3, kvm_cpu_context, fp.f.f[3]); in asm_offsets()
211 OFFSET(KVM_ARCH_FP_F_F4, kvm_cpu_context, fp.f.f[4]); in asm_offsets()
212 OFFSET(KVM_ARCH_FP_F_F5, kvm_cpu_context, fp.f.f[5]); in asm_offsets()
213 OFFSET(KVM_ARCH_FP_F_F6, kvm_cpu_context, fp.f.f[6]); in asm_offsets()
214 OFFSET(KVM_ARCH_FP_F_F7, kvm_cpu_context, fp.f.f[7]); in asm_offsets()
215 OFFSET(KVM_ARCH_FP_F_F8, kvm_cpu_context, fp.f.f[8]); in asm_offsets()
216 OFFSET(KVM_ARCH_FP_F_F9, kvm_cpu_context, fp.f.f[9]); in asm_offsets()
[all …]
/openbmc/linux/arch/arm64/kvm/hyp/vhe/
H A Dsysreg-sr.c28 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_host_state_vhe()
34 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_guest_state_vhe()
41 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_host_state_vhe()
47 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_guest_state_vhe()
67 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; in kvm_vcpu_load_sysregs_vhe()
68 struct kvm_cpu_context *host_ctxt; in kvm_vcpu_load_sysregs_vhe()
112 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; in kvm_vcpu_put_sysregs_vhe()
113 struct kvm_cpu_context *host_ctxt; in kvm_vcpu_put_sysregs_vhe()
H A Dswitch.c33 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
206 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run_vhe()
207 struct kvm_cpu_context *guest_ctxt; in __kvm_vcpu_run_vhe()
298 struct kvm_cpu_context *host_ctxt; in __hyp_call_panic()
/openbmc/linux/arch/arm64/include/asm/
H A Dkvm_hyp.h15 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
93 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
94 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
96 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
97 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
98 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
99 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
121 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
124 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
133 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
H A Dkvm_emulate.h193 static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt) in vcpu_is_el2_ctxt()
209 static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt) in __vcpu_el2_e2h_is_set()
219 static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt) in __vcpu_el2_tge_is_set()
229 static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt) in __is_hyp_ctxt()
/openbmc/linux/arch/riscv/kvm/
H A Dvcpu_fp.c19 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_fp_reset()
29 static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_fp_clean()
35 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_save()
47 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_restore()
59 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_fp_save()
68 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_fp_restore()
81 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_fp()
126 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_fp()
H A Dvcpu_vector.c22 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vector_reset()
34 static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_vector_clean()
40 void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_save()
50 void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_restore()
60 void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_vector_save()
67 void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_vector_restore()
74 struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_alloc_vector_context()
99 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vreg_addr()
H A Dvcpu_sbi_hsm.c17 struct kvm_cpu_context *reset_cntx; in kvm_sbi_hsm_vcpu_start()
18 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_start()
72 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_get_status()
91 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_hsm_handler()
H A Dvcpu_sbi_replace.c20 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_time_handler()
51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_ipi_handler()
95 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_rfence_handler()
150 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_srst_handler()
H A Dvcpu_sbi.c81 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_forward()
121 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_return()
365 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_ecall()
H A Dvcpu_sbi_base.c19 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_base_handler()
H A Dvcpu_sbi_pmu.c20 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_pmu_handler()
/openbmc/linux/arch/arm64/kvm/hyp/include/hyp/
H A Dsysreg-sr.h19 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) in __sysreg_save_common_state()
24 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) in __sysreg_save_user_state()
30 static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) in ctxt_has_mte()
40 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el1_state()
75 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el2_return_state()
89 static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_common_state()
94 static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_user_state()
100 static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_el1_state()
170 static inline u64 to_hw_pstate(const struct kvm_cpu_context *ctxt) in to_hw_pstate()
186 static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_el2_return_state()
H A Ddebug-sr.h92 struct kvm_cpu_context *ctxt) in __debug_save_state()
110 struct kvm_cpu_context *ctxt) in __debug_restore_state()
130 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_guest_common()
131 struct kvm_cpu_context *guest_ctxt; in __debug_switch_to_guest_common()
149 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_host_common()
150 struct kvm_cpu_context *guest_ctxt; in __debug_switch_to_host_common()
H A Dswitch.h84 struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in __activate_traps_hfgxtr()
159 struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in __deactivate_traps_hfgxtr()
187 struct kvm_cpu_context *hctxt; in __activate_traps_common()
223 struct kvm_cpu_context *hctxt; in __deactivate_traps_common()
426 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
430 struct kvm_cpu_context *ctxt; in kvm_hyp_handle_ptrauth()
/openbmc/linux/arch/arm64/kernel/
H A Dasm-offsets.c132 DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); in main()
133 DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); in main()
134 DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1])); in main()
135 DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); in main()
136 DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); in main()
137 DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); in main()
138 DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); in main()
139 DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); in main()
140 DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); in main()
/openbmc/linux/arch/arm64/kvm/hyp/include/nvhe/
H A Dffa.h15 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);

12