1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM64_KVM_HYP_H__ 8 #define __ARM64_KVM_HYP_H__ 9 10 #include <linux/compiler.h> 11 #include <linux/kvm_host.h> 12 #include <asm/alternative.h> 13 #include <asm/sysreg.h> 14 15 #define __hyp_text __section(.hyp.text) notrace __noscs 16 17 #define read_sysreg_elx(r,nvh,vh) \ 18 ({ \ 19 u64 reg; \ 20 asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \ 21 __mrs_s("%0", r##vh), \ 22 ARM64_HAS_VIRT_HOST_EXTN) \ 23 : "=r" (reg)); \ 24 reg; \ 25 }) 26 27 #define write_sysreg_elx(v,r,nvh,vh) \ 28 do { \ 29 u64 __val = (u64)(v); \ 30 asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \ 31 __msr_s(r##vh, "%x0"), \ 32 ARM64_HAS_VIRT_HOST_EXTN) \ 33 : : "rZ" (__val)); \ 34 } while (0) 35 36 /* 37 * Unified accessors for registers that have a different encoding 38 * between VHE and non-VHE. They must be specified without their "ELx" 39 * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h. 40 */ 41 42 #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) 43 #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) 44 #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) 45 #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) 46 #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) 47 #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) 48 49 /* 50 * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the 51 * static inline can allow the compiler to out-of-line this. KVM always wants 52 * the macro version as its always inlined. 53 */ 54 #define __kvm_swab32(x) ___constant_swab32(x) 55 56 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); 57 58 void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if); 59 void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if); 60 void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if); 61 void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if); 62 void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if); 63 void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if); 64 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); 65 66 void __timer_enable_traps(struct kvm_vcpu *vcpu); 67 void __timer_disable_traps(struct kvm_vcpu *vcpu); 68 69 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); 70 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); 71 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); 72 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); 73 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); 74 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); 75 void __sysreg32_save_state(struct kvm_vcpu *vcpu); 76 void __sysreg32_restore_state(struct kvm_vcpu *vcpu); 77 78 void __debug_switch_to_guest(struct kvm_vcpu *vcpu); 79 void __debug_switch_to_host(struct kvm_vcpu *vcpu); 80 81 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); 82 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); 83 84 void activate_traps_vhe_load(struct kvm_vcpu *vcpu); 85 void deactivate_traps_vhe_put(void); 86 87 u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); 88 void __noreturn __hyp_do_panic(unsigned long, ...); 89 90 #endif /* __ARM64_KVM_HYP_H__ */ 91 92