xref: /openbmc/linux/arch/arm64/include/asm/kvm_hyp.h (revision d2574c33)
1 /*
2  * Copyright (C) 2015 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef __ARM64_KVM_HYP_H__
19 #define __ARM64_KVM_HYP_H__
20 
21 #include <linux/compiler.h>
22 #include <linux/kvm_host.h>
23 #include <asm/alternative.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/sysreg.h>
26 
27 #define __hyp_text __section(.hyp.text) notrace
28 
29 #define read_sysreg_elx(r,nvh,vh)					\
30 	({								\
31 		u64 reg;						\
32 		asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
33 					 "mrs_s %0, " __stringify(r##vh),\
34 					 ARM64_HAS_VIRT_HOST_EXTN)	\
35 			     : "=r" (reg));				\
36 		reg;							\
37 	})
38 
39 #define write_sysreg_elx(v,r,nvh,vh)					\
40 	do {								\
41 		u64 __val = (u64)(v);					\
42 		asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
43 					 "msr_s " __stringify(r##vh) ", %x0",\
44 					 ARM64_HAS_VIRT_HOST_EXTN)	\
45 					 : : "rZ" (__val));		\
46 	} while (0)
47 
48 /*
49  * Unified accessors for registers that have a different encoding
50  * between VHE and non-VHE. They must be specified without their "ELx"
51  * encoding.
52  */
53 #define read_sysreg_el2(r)						\
54 	({								\
55 		u64 reg;						\
56 		asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\
57 					 "mrs %0, " __stringify(r##_EL1),\
58 					 ARM64_HAS_VIRT_HOST_EXTN)	\
59 			     : "=r" (reg));				\
60 		reg;							\
61 	})
62 
63 #define write_sysreg_el2(v,r)						\
64 	do {								\
65 		u64 __val = (u64)(v);					\
66 		asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\
67 					 "msr " __stringify(r##_EL1) ", %x0",\
68 					 ARM64_HAS_VIRT_HOST_EXTN)	\
69 					 : : "rZ" (__val));		\
70 	} while (0)
71 
72 #define read_sysreg_el0(r)	read_sysreg_elx(r, _EL0, _EL02)
73 #define write_sysreg_el0(v,r)	write_sysreg_elx(v, r, _EL0, _EL02)
74 #define read_sysreg_el1(r)	read_sysreg_elx(r, _EL1, _EL12)
75 #define write_sysreg_el1(v,r)	write_sysreg_elx(v, r, _EL1, _EL12)
76 
77 /* The VHE specific system registers and their encoding */
78 #define sctlr_EL12              sys_reg(3, 5, 1, 0, 0)
79 #define cpacr_EL12              sys_reg(3, 5, 1, 0, 2)
80 #define ttbr0_EL12              sys_reg(3, 5, 2, 0, 0)
81 #define ttbr1_EL12              sys_reg(3, 5, 2, 0, 1)
82 #define tcr_EL12                sys_reg(3, 5, 2, 0, 2)
83 #define afsr0_EL12              sys_reg(3, 5, 5, 1, 0)
84 #define afsr1_EL12              sys_reg(3, 5, 5, 1, 1)
85 #define esr_EL12                sys_reg(3, 5, 5, 2, 0)
86 #define far_EL12                sys_reg(3, 5, 6, 0, 0)
87 #define mair_EL12               sys_reg(3, 5, 10, 2, 0)
88 #define amair_EL12              sys_reg(3, 5, 10, 3, 0)
89 #define vbar_EL12               sys_reg(3, 5, 12, 0, 0)
90 #define contextidr_EL12         sys_reg(3, 5, 13, 0, 1)
91 #define cntkctl_EL12            sys_reg(3, 5, 14, 1, 0)
92 #define cntp_tval_EL02          sys_reg(3, 5, 14, 2, 0)
93 #define cntp_ctl_EL02           sys_reg(3, 5, 14, 2, 1)
94 #define cntp_cval_EL02          sys_reg(3, 5, 14, 2, 2)
95 #define cntv_tval_EL02          sys_reg(3, 5, 14, 3, 0)
96 #define cntv_ctl_EL02           sys_reg(3, 5, 14, 3, 1)
97 #define cntv_cval_EL02          sys_reg(3, 5, 14, 3, 2)
98 #define spsr_EL12               sys_reg(3, 5, 4, 0, 0)
99 #define elr_EL12                sys_reg(3, 5, 4, 0, 1)
100 
101 /**
102  * hyp_alternate_select - Generates patchable code sequences that are
103  * used to switch between two implementations of a function, depending
104  * on the availability of a feature.
105  *
106  * @fname: a symbol name that will be defined as a function returning a
107  * function pointer whose type will match @orig and @alt
108  * @orig: A pointer to the default function, as returned by @fname when
109  * @cond doesn't hold
110  * @alt: A pointer to the alternate function, as returned by @fname
111  * when @cond holds
112  * @cond: a CPU feature (as described in asm/cpufeature.h)
113  */
114 #define hyp_alternate_select(fname, orig, alt, cond)			\
115 typeof(orig) * __hyp_text fname(void)					\
116 {									\
117 	typeof(alt) *val = orig;					\
118 	asm volatile(ALTERNATIVE("nop		\n",			\
119 				 "mov	%0, %1	\n",			\
120 				 cond)					\
121 		     : "+r" (val) : "r" (alt));				\
122 	return val;							\
123 }
124 
125 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
126 
127 void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
128 void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
129 void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
130 void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
131 void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
132 void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
133 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
134 
135 void __timer_enable_traps(struct kvm_vcpu *vcpu);
136 void __timer_disable_traps(struct kvm_vcpu *vcpu);
137 
138 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
139 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
140 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
141 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
142 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
143 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
144 void __sysreg32_save_state(struct kvm_vcpu *vcpu);
145 void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
146 
147 void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
148 void __debug_switch_to_host(struct kvm_vcpu *vcpu);
149 
150 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
151 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
152 bool __fpsimd_enabled(void);
153 
154 void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
155 void deactivate_traps_vhe_put(void);
156 
157 u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
158 void __noreturn __hyp_do_panic(unsigned long, ...);
159 
160 /*
161  * Must be called from hyp code running at EL2 with an updated VTTBR
162  * and interrupts disabled.
163  */
164 static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
165 {
166 	write_sysreg(kvm->arch.vtcr, vtcr_el2);
167 	write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
168 
169 	/*
170 	 * ARM erratum 1165522 requires the actual execution of the above
171 	 * before we can switch to the EL1/EL0 translation regime used by
172 	 * the guest.
173 	 */
174 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
175 }
176 
177 #endif /* __ARM64_KVM_HYP_H__ */
178 
179