xref: /openbmc/linux/arch/arm64/include/asm/kvm_asm.h (revision b296a6d5)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9 
10 #include <asm/virt.h>
11 
12 #define ARM_EXIT_WITH_SERROR_BIT  31
13 #define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
14 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
15 #define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
16 
17 #define ARM_EXCEPTION_IRQ	  0
18 #define ARM_EXCEPTION_EL1_SERROR  1
19 #define ARM_EXCEPTION_TRAP	  2
20 #define ARM_EXCEPTION_IL	  3
21 /* The hyp-stub will return this for any kvm_call_hyp() call */
22 #define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
23 
24 #define kvm_arm_exception_type					\
25 	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
26 	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
27 	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
28 	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}
29 
30 /*
31  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
32  * that jumps over this.
33  */
34 #define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
35 
36 #define __SMCCC_WORKAROUND_1_SMC_SZ 36
37 
38 #ifndef __ASSEMBLY__
39 
40 #include <linux/mm.h>
41 
42 /*
43  * Translate name of a symbol defined in nVHE hyp to the name seen
44  * by kernel proper. All nVHE symbols are prefixed by the build system
45  * to avoid clashes with the VHE variants.
46  */
47 #define kvm_nvhe_sym(sym)	__kvm_nvhe_##sym
48 
49 #define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
50 #define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
51 
52 /*
53  * Define a pair of symbols sharing the same name but one defined in
54  * VHE and the other in nVHE hyp implementations.
55  */
56 #define DECLARE_KVM_HYP_SYM(sym)		\
57 	DECLARE_KVM_VHE_SYM(sym);		\
58 	DECLARE_KVM_NVHE_SYM(sym)
59 
60 #define CHOOSE_VHE_SYM(sym)	sym
61 #define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
62 
63 #ifndef __KVM_NVHE_HYPERVISOR__
64 /*
65  * BIG FAT WARNINGS:
66  *
67  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
68  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
69  *   while this is used early at boot time, when the capabilities are
70  *   not final yet....
71  *
72  * - Don't let the nVHE hypervisor have access to this, as it will
73  *   pick the *wrong* symbol (yes, it runs at EL2...).
74  */
75 #define CHOOSE_HYP_SYM(sym)	(is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
76 					   : CHOOSE_NVHE_SYM(sym))
77 #else
78 /* The nVHE hypervisor shouldn't even try to access anything */
79 extern void *__nvhe_undefined_symbol;
80 #define CHOOSE_HYP_SYM(sym)	__nvhe_undefined_symbol
81 #endif
82 
83 /* Translate a kernel address @ptr into its equivalent linear mapping */
84 #define kvm_ksym_ref(ptr)						\
85 	({								\
86 		void *val = (ptr);					\
87 		if (!is_kernel_in_hyp_mode())				\
88 			val = lm_alias((ptr));				\
89 		val;							\
90 	 })
91 #define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
92 
93 struct kvm;
94 struct kvm_vcpu;
95 struct kvm_s2_mmu;
96 
97 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
98 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
99 #define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
100 #define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)
101 
102 extern atomic_t arm64_el2_vector_last_slot;
103 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
104 #define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
105 
106 extern void __kvm_flush_vm_context(void);
107 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
108 				     int level);
109 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
110 extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
111 
112 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
113 
114 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
115 
116 extern void __kvm_enable_ssbs(void);
117 
118 extern u64 __vgic_v3_get_ich_vtr_el2(void);
119 extern u64 __vgic_v3_read_vmcr(void);
120 extern void __vgic_v3_write_vmcr(u32 vmcr);
121 extern void __vgic_v3_init_lrs(void);
122 
123 extern u32 __kvm_get_mdcr_el2(void);
124 
125 extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
126 
127 /*
128  * Obtain the PC-relative address of a kernel symbol
129  * s: symbol
130  *
131  * The goal of this macro is to return a symbol's address based on a
132  * PC-relative computation, as opposed to a loading the VA from a
133  * constant pool or something similar. This works well for HYP, as an
134  * absolute VA is guaranteed to be wrong. Only use this if trying to
135  * obtain the address of a symbol (i.e. not something you obtained by
136  * following a pointer).
137  */
138 #define hyp_symbol_addr(s)						\
139 	({								\
140 		typeof(s) *addr;					\
141 		asm("adrp	%0, %1\n"				\
142 		    "add	%0, %0, :lo12:%1\n"			\
143 		    : "=r" (addr) : "S" (&s));				\
144 		addr;							\
145 	})
146 
147 /*
148  * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
149  * provided that sym is really a *symbol* and not a pointer obtained from
150  * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
151  * sparse quiet.
152  */
153 #define __hyp_this_cpu_ptr(sym)						\
154 	({								\
155 		void *__ptr;						\
156 		__verify_pcpu_ptr(&sym);				\
157 		__ptr = hyp_symbol_addr(sym);				\
158 		__ptr += read_sysreg(tpidr_el2);			\
159 		(typeof(sym) __kernel __force *)__ptr;			\
160 	 })
161 
162 #define __hyp_this_cpu_read(sym)					\
163 	({								\
164 		*__hyp_this_cpu_ptr(sym);				\
165 	 })
166 
167 #define __KVM_EXTABLE(from, to)						\
168 	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
169 	"	.align		3\n"					\
170 	"	.long		(" #from " - .), (" #to " - .)\n"	\
171 	"	.popsection\n"
172 
173 
174 #define __kvm_at(at_op, addr)						\
175 ( { 									\
176 	int __kvm_at_err = 0;						\
177 	u64 spsr, elr;							\
178 	asm volatile(							\
179 	"	mrs	%1, spsr_el2\n"					\
180 	"	mrs	%2, elr_el2\n"					\
181 	"1:	at	"at_op", %3\n"					\
182 	"	isb\n"							\
183 	"	b	9f\n"						\
184 	"2:	msr	spsr_el2, %1\n"					\
185 	"	msr	elr_el2, %2\n"					\
186 	"	mov	%w0, %4\n"					\
187 	"9:\n"								\
188 	__KVM_EXTABLE(1b, 2b)						\
189 	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
190 	: "r" (addr), "i" (-EFAULT));					\
191 	__kvm_at_err;							\
192 } )
193 
194 
195 #else /* __ASSEMBLY__ */
196 
197 .macro hyp_adr_this_cpu reg, sym, tmp
198 	adr_l	\reg, \sym
199 	mrs	\tmp, tpidr_el2
200 	add	\reg, \reg, \tmp
201 .endm
202 
203 .macro hyp_ldr_this_cpu reg, sym, tmp
204 	adr_l	\reg, \sym
205 	mrs	\tmp, tpidr_el2
206 	ldr	\reg,  [\reg, \tmp]
207 .endm
208 
209 .macro get_host_ctxt reg, tmp
210 	hyp_adr_this_cpu \reg, kvm_host_data, \tmp
211 	add	\reg, \reg, #HOST_DATA_CONTEXT
212 .endm
213 
214 .macro get_vcpu_ptr vcpu, ctxt
215 	get_host_ctxt \ctxt, \vcpu
216 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
217 .endm
218 
219 /*
220  * KVM extable for unexpected exceptions.
221  * In the same format _asm_extable, but output to a different section so that
222  * it can be mapped to EL2. The KVM version is not sorted. The caller must
223  * ensure:
224  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
225  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
226  */
227 .macro	_kvm_extable, from, to
228 	.pushsection	__kvm_ex_table, "a"
229 	.align		3
230 	.long		(\from - .), (\to - .)
231 	.popsection
232 .endm
233 
234 #endif
235 
236 #endif /* __ARM_KVM_ASM_H__ */
237