xref: /openbmc/linux/arch/arm64/include/asm/kvm_hyp.h (revision 8a511e7efc5a72173f64d191f01cda236d54e27a)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
213720a56SMarc Zyngier /*
313720a56SMarc Zyngier  * Copyright (C) 2015 - ARM Ltd
413720a56SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
513720a56SMarc Zyngier  */
613720a56SMarc Zyngier 
713720a56SMarc Zyngier #ifndef __ARM64_KVM_HYP_H__
813720a56SMarc Zyngier #define __ARM64_KVM_HYP_H__
913720a56SMarc Zyngier 
1013720a56SMarc Zyngier #include <linux/compiler.h>
1113720a56SMarc Zyngier #include <linux/kvm_host.h>
121e4448c5SMarc Zyngier #include <asm/alternative.h>
1313720a56SMarc Zyngier #include <asm/sysreg.h>
1413720a56SMarc Zyngier 
15b619d9aaSAndrew Scull DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
16a0e47952SAndrew Scull DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
1763fec243SDavid Brazdil DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
18a0e47952SAndrew Scull 
1957e784b4SMarc Zyngier /*
2057e784b4SMarc Zyngier  * Unified accessors for registers that have a different encoding
2157e784b4SMarc Zyngier  * between VHE and non-VHE. They must be specified without their "ELx"
2257e784b4SMarc Zyngier  * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
2357e784b4SMarc Zyngier  */
2457e784b4SMarc Zyngier 
2557e784b4SMarc Zyngier #if defined(__KVM_VHE_HYPERVISOR__)
2657e784b4SMarc Zyngier 
2757e784b4SMarc Zyngier #define read_sysreg_el0(r)	read_sysreg_s(r##_EL02)
2857e784b4SMarc Zyngier #define write_sysreg_el0(v,r)	write_sysreg_s(v, r##_EL02)
2957e784b4SMarc Zyngier #define read_sysreg_el1(r)	read_sysreg_s(r##_EL12)
3057e784b4SMarc Zyngier #define write_sysreg_el1(v,r)	write_sysreg_s(v, r##_EL12)
3157e784b4SMarc Zyngier #define read_sysreg_el2(r)	read_sysreg_s(r##_EL1)
3257e784b4SMarc Zyngier #define write_sysreg_el2(v,r)	write_sysreg_s(v, r##_EL1)
3357e784b4SMarc Zyngier 
3457e784b4SMarc Zyngier #else // !__KVM_VHE_HYPERVISOR__
3557e784b4SMarc Zyngier 
366f617d3aSMarc Zyngier #if defined(__KVM_NVHE_HYPERVISOR__)
376f617d3aSMarc Zyngier #define VHE_ALT_KEY	ARM64_KVM_HVHE
386f617d3aSMarc Zyngier #else
396f617d3aSMarc Zyngier #define VHE_ALT_KEY	ARM64_HAS_VIRT_HOST_EXTN
406f617d3aSMarc Zyngier #endif
416f617d3aSMarc Zyngier 
4213720a56SMarc Zyngier #define read_sysreg_elx(r,nvh,vh)					\
4313720a56SMarc Zyngier 	({								\
4413720a56SMarc Zyngier 		u64 reg;						\
45fdec2a9eSDave Martin 		asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh),		\
46be604c61SKees Cook 					 __mrs_s("%0", r##vh),		\
476f617d3aSMarc Zyngier 					 VHE_ALT_KEY)			\
4813720a56SMarc Zyngier 			     : "=r" (reg));				\
4913720a56SMarc Zyngier 		reg;							\
5013720a56SMarc Zyngier 	})
5113720a56SMarc Zyngier 
5213720a56SMarc Zyngier #define write_sysreg_elx(v,r,nvh,vh)					\
5313720a56SMarc Zyngier 	do {								\
5413720a56SMarc Zyngier 		u64 __val = (u64)(v);					\
55fdec2a9eSDave Martin 		asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"),	\
56be604c61SKees Cook 					 __msr_s(r##vh, "%x0"),		\
576f617d3aSMarc Zyngier 					 VHE_ALT_KEY)			\
5813720a56SMarc Zyngier 					 : : "rZ" (__val));		\
5913720a56SMarc Zyngier 	} while (0)
6013720a56SMarc Zyngier 
6113720a56SMarc Zyngier #define read_sysreg_el0(r)	read_sysreg_elx(r, _EL0, _EL02)
6213720a56SMarc Zyngier #define write_sysreg_el0(v,r)	write_sysreg_elx(v, r, _EL0, _EL02)
6313720a56SMarc Zyngier #define read_sysreg_el1(r)	read_sysreg_elx(r, _EL1, _EL12)
6413720a56SMarc Zyngier #define write_sysreg_el1(v,r)	write_sysreg_elx(v, r, _EL1, _EL12)
65fdec2a9eSDave Martin #define read_sysreg_el2(r)	read_sysreg_elx(r, _EL2, _EL1)
66fdec2a9eSDave Martin #define write_sysreg_el2(v,r)	write_sysreg_elx(v, r, _EL2, _EL1)
6713720a56SMarc Zyngier 
6857e784b4SMarc Zyngier #endif	// __KVM_VHE_HYPERVISOR__
6957e784b4SMarc Zyngier 
708c2d146eSJames Morse /*
718c2d146eSJames Morse  * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
728c2d146eSJames Morse  * static inline can allow the compiler to out-of-line this. KVM always wants
738c2d146eSJames Morse  * the macro version as its always inlined.
748c2d146eSJames Morse  */
758c2d146eSJames Morse #define __kvm_swab32(x)	___constant_swab32(x)
768c2d146eSJames Morse 
773272f0d0SMarc Zyngier int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
7813720a56SMarc Zyngier 
79fc5d1f1aSChristoffer Dall void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
80fc5d1f1aSChristoffer Dall void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
81fc5d1f1aSChristoffer Dall void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
82fc5d1f1aSChristoffer Dall void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
83fc5d1f1aSChristoffer Dall void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
84fc5d1f1aSChristoffer Dall void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
8559da1cbfSMarc Zyngier int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
8613720a56SMarc Zyngier 
879aebdea4SDavid Brazdil #ifdef __KVM_NVHE_HYPERVISOR__
88688c50aaSChristoffer Dall void __timer_enable_traps(struct kvm_vcpu *vcpu);
89688c50aaSChristoffer Dall void __timer_disable_traps(struct kvm_vcpu *vcpu);
909aebdea4SDavid Brazdil #endif
9113720a56SMarc Zyngier 
9213aeb9b4SDavid Brazdil #ifdef __KVM_NVHE_HYPERVISOR__
934cdecabaSChristoffer Dall void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
944cdecabaSChristoffer Dall void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
9513aeb9b4SDavid Brazdil #else
96f837453dSChristoffer Dall void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
97f837453dSChristoffer Dall void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
98f837453dSChristoffer Dall void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
99f837453dSChristoffer Dall void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
10013aeb9b4SDavid Brazdil #endif
10113720a56SMarc Zyngier 
102014c4c77SChristoffer Dall void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
103014c4c77SChristoffer Dall void __debug_switch_to_host(struct kvm_vcpu *vcpu);
10413720a56SMarc Zyngier 
105b96b0c5dSSuzuki K Poulose #ifdef __KVM_NVHE_HYPERVISOR__
106b96b0c5dSSuzuki K Poulose void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
107b96b0c5dSSuzuki K Poulose void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
108b96b0c5dSSuzuki K Poulose #endif
109b96b0c5dSSuzuki K Poulose 
11013720a56SMarc Zyngier void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
11113720a56SMarc Zyngier void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
11252029198SMarc Zyngier void __sve_restore_state(void *sve_pffr, u32 *fpsr);
11313720a56SMarc Zyngier 
11409cf57ebSDavid Brazdil #ifndef __KVM_NVHE_HYPERVISOR__
115a2465629SChristoffer Dall void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
1161460b4b2SFuad Tabba void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
11709cf57ebSDavid Brazdil #endif
118a2465629SChristoffer Dall 
119b619d9aaSAndrew Scull u64 __guest_enter(struct kvm_vcpu *vcpu);
12009cf57ebSDavid Brazdil 
121*373beef0SJean-Philippe Brucker bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
122eeeee719SDavid Brazdil 
12309cf57ebSDavid Brazdil #ifdef __KVM_NVHE_HYPERVISOR__
124c4b000c3SAndrew Scull void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
125c4b000c3SAndrew Scull 			       u64 elr, u64 par);
12609cf57ebSDavid Brazdil #endif
12713720a56SMarc Zyngier 
128f320bc74SQuentin Perret #ifdef __KVM_NVHE_HYPERVISOR__
129f320bc74SQuentin Perret void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
130f320bc74SQuentin Perret 			    phys_addr_t pgd, void *sp, void *cont_fn);
131f320bc74SQuentin Perret int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
132f320bc74SQuentin Perret 		unsigned long *per_cpu_base, u32 hyp_va_bits);
133f320bc74SQuentin Perret void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
134f320bc74SQuentin Perret #endif
135f320bc74SQuentin Perret 
1366c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
1376c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
1386c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
1396c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
140def8c222SVladimir Murzin extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
1417c419937SMarc Zyngier extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
1427c419937SMarc Zyngier extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
1436c30bfb1SFuad Tabba extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
1448669651cSQuentin Perret extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
1457c419937SMarc Zyngier 
14613e248aaSWill Deacon extern unsigned long kvm_nvhe_sym(__icache_flags);
14773f38ef2SWill Deacon extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
14873f38ef2SWill Deacon 
14913720a56SMarc Zyngier #endif /* __ARM64_KVM_HYP_H__ */
150