xref: /openbmc/linux/arch/arm64/include/asm/kvm_asm.h (revision 3a83e4e6)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9 
10 #include <asm/virt.h>
11 
12 #define	VCPU_WORKAROUND_2_FLAG_SHIFT	0
13 #define	VCPU_WORKAROUND_2_FLAG		(_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
14 
15 #define ARM_EXIT_WITH_SERROR_BIT  31
16 #define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
17 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
18 #define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
19 
20 #define ARM_EXCEPTION_IRQ	  0
21 #define ARM_EXCEPTION_EL1_SERROR  1
22 #define ARM_EXCEPTION_TRAP	  2
23 #define ARM_EXCEPTION_IL	  3
24 /* The hyp-stub will return this for any kvm_call_hyp() call */
25 #define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
26 
27 #define kvm_arm_exception_type					\
28 	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
29 	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
30 	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
31 	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}
32 
33 /*
34  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
35  * that jumps over this.
36  */
37 #define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
38 
39 #define __SMCCC_WORKAROUND_1_SMC_SZ 36
40 
41 #ifndef __ASSEMBLY__
42 
43 #include <linux/mm.h>
44 
45 /*
46  * Translate name of a symbol defined in nVHE hyp to the name seen
47  * by kernel proper. All nVHE symbols are prefixed by the build system
48  * to avoid clashes with the VHE variants.
49  */
50 #define kvm_nvhe_sym(sym)	__kvm_nvhe_##sym
51 
52 #define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
53 #define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
54 
55 /*
56  * Define a pair of symbols sharing the same name but one defined in
57  * VHE and the other in nVHE hyp implementations.
58  */
59 #define DECLARE_KVM_HYP_SYM(sym)		\
60 	DECLARE_KVM_VHE_SYM(sym);		\
61 	DECLARE_KVM_NVHE_SYM(sym)
62 
63 #define CHOOSE_VHE_SYM(sym)	sym
64 #define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
65 
66 #ifndef __KVM_NVHE_HYPERVISOR__
67 /*
68  * BIG FAT WARNINGS:
69  *
70  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
71  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
72  *   while this is used early at boot time, when the capabilities are
73  *   not final yet....
74  *
75  * - Don't let the nVHE hypervisor have access to this, as it will
76  *   pick the *wrong* symbol (yes, it runs at EL2...).
77  */
78 #define CHOOSE_HYP_SYM(sym)	(is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
79 					   : CHOOSE_NVHE_SYM(sym))
80 #else
81 /* The nVHE hypervisor shouldn't even try to access anything */
82 extern void *__nvhe_undefined_symbol;
83 #define CHOOSE_HYP_SYM(sym)	__nvhe_undefined_symbol
84 #endif
85 
86 /* Translate a kernel address @ptr into its equivalent linear mapping */
87 #define kvm_ksym_ref(ptr)						\
88 	({								\
89 		void *val = (ptr);					\
90 		if (!is_kernel_in_hyp_mode())				\
91 			val = lm_alias((ptr));				\
92 		val;							\
93 	 })
94 #define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
95 
96 struct kvm;
97 struct kvm_vcpu;
98 struct kvm_s2_mmu;
99 
100 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
101 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
102 #define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
103 #define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)
104 
105 #ifdef CONFIG_KVM_INDIRECT_VECTORS
106 extern atomic_t arm64_el2_vector_last_slot;
107 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
108 #define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
109 #endif
110 
111 extern void __kvm_flush_vm_context(void);
112 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
113 				     int level);
114 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
115 extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
116 
117 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
118 
119 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
120 
121 extern void __kvm_enable_ssbs(void);
122 
123 extern u64 __vgic_v3_get_ich_vtr_el2(void);
124 extern u64 __vgic_v3_read_vmcr(void);
125 extern void __vgic_v3_write_vmcr(u32 vmcr);
126 extern void __vgic_v3_init_lrs(void);
127 
128 extern u32 __kvm_get_mdcr_el2(void);
129 
130 extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
131 
132 /*
133  * Obtain the PC-relative address of a kernel symbol
134  * s: symbol
135  *
136  * The goal of this macro is to return a symbol's address based on a
137  * PC-relative computation, as opposed to a loading the VA from a
138  * constant pool or something similar. This works well for HYP, as an
139  * absolute VA is guaranteed to be wrong. Only use this if trying to
140  * obtain the address of a symbol (i.e. not something you obtained by
141  * following a pointer).
142  */
143 #define hyp_symbol_addr(s)						\
144 	({								\
145 		typeof(s) *addr;					\
146 		asm("adrp	%0, %1\n"				\
147 		    "add	%0, %0, :lo12:%1\n"			\
148 		    : "=r" (addr) : "S" (&s));				\
149 		addr;							\
150 	})
151 
152 /*
153  * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
154  * provided that sym is really a *symbol* and not a pointer obtained from
155  * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
156  * sparse quiet.
157  */
158 #define __hyp_this_cpu_ptr(sym)						\
159 	({								\
160 		void *__ptr;						\
161 		__verify_pcpu_ptr(&sym);				\
162 		__ptr = hyp_symbol_addr(sym);				\
163 		__ptr += read_sysreg(tpidr_el2);			\
164 		(typeof(sym) __kernel __force *)__ptr;			\
165 	 })
166 
167 #define __hyp_this_cpu_read(sym)					\
168 	({								\
169 		*__hyp_this_cpu_ptr(sym);				\
170 	 })
171 
172 #define __KVM_EXTABLE(from, to)						\
173 	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
174 	"	.align		3\n"					\
175 	"	.long		(" #from " - .), (" #to " - .)\n"	\
176 	"	.popsection\n"
177 
178 
179 #define __kvm_at(at_op, addr)						\
180 ( { 									\
181 	int __kvm_at_err = 0;						\
182 	u64 spsr, elr;							\
183 	asm volatile(							\
184 	"	mrs	%1, spsr_el2\n"					\
185 	"	mrs	%2, elr_el2\n"					\
186 	"1:	at	"at_op", %3\n"					\
187 	"	isb\n"							\
188 	"	b	9f\n"						\
189 	"2:	msr	spsr_el2, %1\n"					\
190 	"	msr	elr_el2, %2\n"					\
191 	"	mov	%w0, %4\n"					\
192 	"9:\n"								\
193 	__KVM_EXTABLE(1b, 2b)						\
194 	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
195 	: "r" (addr), "i" (-EFAULT));					\
196 	__kvm_at_err;							\
197 } )
198 
199 
200 #else /* __ASSEMBLY__ */
201 
202 .macro hyp_adr_this_cpu reg, sym, tmp
203 	adr_l	\reg, \sym
204 	mrs	\tmp, tpidr_el2
205 	add	\reg, \reg, \tmp
206 .endm
207 
208 .macro hyp_ldr_this_cpu reg, sym, tmp
209 	adr_l	\reg, \sym
210 	mrs	\tmp, tpidr_el2
211 	ldr	\reg,  [\reg, \tmp]
212 .endm
213 
214 .macro get_host_ctxt reg, tmp
215 	hyp_adr_this_cpu \reg, kvm_host_data, \tmp
216 	add	\reg, \reg, #HOST_DATA_CONTEXT
217 .endm
218 
219 .macro get_vcpu_ptr vcpu, ctxt
220 	get_host_ctxt \ctxt, \vcpu
221 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
222 .endm
223 
224 /*
225  * KVM extable for unexpected exceptions.
226  * In the same format _asm_extable, but output to a different section so that
227  * it can be mapped to EL2. The KVM version is not sorted. The caller must
228  * ensure:
229  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
230  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
231  */
232 .macro	_kvm_extable, from, to
233 	.pushsection	__kvm_ex_table, "a"
234 	.align		3
235 	.long		(\from - .), (\to - .)
236 	.popsection
237 .endm
238 
239 #endif
240 
241 #endif /* __ARM_KVM_ASM_H__ */
242